summaryrefslogtreecommitdiffstats
authorRaghavendra D Prabhu <[email protected]>2011-03-12 18:54:47 (GMT)
committer Raghavendra D Prabhu <[email protected]>2011-03-12 18:54:47 (GMT)
commit5ec75ea891c9b44cdd2d7a3e0c29b4d08dff2b2a (patch) (side-by-side diff)
treedeabb15393558ab3a1a629fc0782d8b0c3124c48
downloadScripts-5ec75ea891c9b44cdd2d7a3e0c29b4d08dff2b2a.zip
Scripts-5ec75ea891c9b44cdd2d7a3e0c29b4d08dff2b2a.tar.gz
Scripts-5ec75ea891c9b44cdd2d7a3e0c29b4d08dff2b2a.tar.bz2
Initial scripts update
Diffstat (more/less context) (ignore whitespace changes)
-rw-r--r--.gitignore3
-rwxr-xr-x:rbas6
-rwxr-xr-x:uzbl-bookmark8
-rwxr-xr-x:uzbl-history8
-rw-r--r--README1
-rwxr-xr-xabs8
-rwxr-xr-xack2778
-rwxr-xr-xacpihandler66
-rwxr-xr-xagvim2
-rwxr-xr-xalert11
-rwxr-xr-xarchievm83
-rwxr-xr-xaria7
-rwxr-xr-xaria2mon146
-rwxr-xr-xaria2rpc361
-rwxr-xr-xautorss.py60
-rwxr-xr-xbattery_mon5
-rwxr-xr-xbldins176
-rwxr-xr-xblogit13
-rwxr-xr-xbloker50
-rwxr-xr-xbooktwit46
-rwxr-xr-xbrightness16
-rwxr-xr-xbrowser53
-rwxr-xr-xbuildkernel35
-rwxr-xr-xcatc24
-rwxr-xr-xchecksec849
-rwxr-xr-xclipbored186
-rwxr-xr-xcolorit30
-rwxr-xr-xcolortest365
-rwxr-xr-xconsume39
-rwxr-xr-xcorer28
-rwxr-xr-xcpustat11
-rwxr-xr-xcurrent17
-rwxr-xr-xdailyshow74
-rw-r--r--dbg-trace.sh79
-rwxr-xr-xdiditchange27
-rwxr-xr-xdmenu_run4
-rwxr-xr-xdmenuclip52
-rwxr-xr-xdmenurl74
-rwxr-xr-xdown14
-rwxr-xr-xdownload18
-rwxr-xr-xdownloadStats15
-rwxr-xr-xfetch_poster.py322
-rwxr-xr-xfincore347
-rwxr-xr-xflplay12
-rwxr-xr-xforward16
-rw-r--r--functions610
-rwxr-xr-xfxr90
-rwxr-xr-xgetimap27
-rwxr-xr-xgitprompt20
-rw-r--r--imdbpy.pycbin0 -> 12024 bytes
-rwxr-xr-xinstallkernel140
-rwxr-xr-xjslint4271
-rwxr-xr-xloadutil48
-rwxr-xr-xmad71
-rwxr-xr-xmailboxes43
-rwxr-xr-xmailhops96
-rwxr-xr-xmailto27
-rwxr-xr-xmark-yank-urls310
-rwxr-xr-xmmove15
-rwxr-xr-xmnger40
-rwxr-xr-xmodprobe41
-rwxr-xr-xmoviemanager70
-rwxr-xr-xmpdStats4
-rwxr-xr-xmpdspl.py622
-rwxr-xr-xmplayer195
l---------mplayeraux1
-rwxr-xr-xmuxSend17
-rwxr-xr-xnbookmark9
-rwxr-xr-xnoter30
-rwxr-xr-xnotify-send137
l---------notify-send-aria21
l---------notify-send-beuter1
-rwxr-xr-xotv14
-rwxr-xr-xpacman-color4
-rwxr-xr-xparallel5184
-rwxr-xr-xparcelactions7
-rwxr-xr-xplmpd102
-rwxr-xr-xplocate11
-rwxr-xr-xpmp17
-rwxr-xr-xpolipurge6
-rwxr-xr-xport2311
-rwxr-xr-xpostit.sh24
-rwxr-xr-xposts.sh5
-rwxr-xr-xprompt_git_info27
-rwxr-xr-xpurgepolipo6
-rwxr-xr-xpytrends.py10
-rwxr-xr-xqplay55
-rwxr-xr-xr_handler10
-rwxr-xr-xrecordit3
-rwxr-xr-xreminder31
-rwxr-xr-xrssdownload45
-rwxr-xr-xrtail26
-rwxr-xr-xscratcher9
-rwxr-xr-xsearch_mail4
-rwxr-xr-xsearchtwit.py35
-rwxr-xr-xset_wall2
-rwxr-xr-xshaper77
-rwxr-xr-xshellrun7
-rwxr-xr-xshotrss18
-rwxr-xr-xspeeddial14
-rwxr-xr-xssh-expect6
-rwxr-xr-xsuz66
-rwxr-xr-xtagit12
-rwxr-xr-xtmux-url28
-rwxr-xr-xtoggle4
-rwxr-xr-xtpad7
-rwxr-xr-xtubeplay8
-rwxr-xr-xtux104
l---------ubrowser1
-rwxr-xr-xunmount-removables35
-rwxr-xr-xvimote67
-rwxr-xr-xvimpager86
-rwxr-xr-xvindex13
-rwxr-xr-xvnews72
-rwxr-xr-xvolchange28
-rwxr-xr-xw3m60
-rwxr-xr-xwsync15
-rwxr-xr-xyy6
-rwxr-xr-xzshdb137
119 files changed, 20000 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..97453c8
--- a/dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+.junk2/
+.junkyard/
+libexec/
diff --git a/:rbas b/:rbas
new file mode 100755
index 0000000..246b46b
--- a/dev/null
+++ b/:rbas
@@ -0,0 +1,6 @@
+#!/bin/zsh -i
+#. ~/.bashrc
+#. ~/bin/functions
+#. ~/.bash_alias
+#result=`"[email protected]"`
+"[email protected]" | tr -d '*' | zenity --text-info --height 400 --width 680
diff --git a/:uzbl-bookmark b/:uzbl-bookmark
new file mode 100755
index 0000000..e6eea41
--- a/dev/null
+++ b/:uzbl-bookmark
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+file=${XDG_DATA_HOME:-$HOME/.local/share}/uzbl/bookmarks
+[ -r "$file" ] || exit
+#DMENU="$HOME/bin/dmenu/dmenu -i -l 3 -nb #303030 -nf black -sb #303030 -p :"
+goto=`$DMENU < $file | awk '{print $1}'`
+
+[ -n "$goto" ] && uzbl-browser $goto
diff --git a/:uzbl-history b/:uzbl-history
new file mode 100755
index 0000000..f2448bc
--- a/dev/null
+++ b/:uzbl-history
@@ -0,0 +1,8 @@
+#!/bin/sh
+history_file=${XDG_DATA_HOME:-$HOME/.local/share}/uzbl/history
+[ -r "$history_file" ] || exit 1
+
+#DMENU="$HOME/bin/dmenu/dmenu -i -l 3 -nb #303030 -nf black -sb #303030 -p :" # vertical patch
+goto=`tac $history_file | $DMENU $COLORS | cut -d ' ' -f -3 | awk '{print $NF}'`
+
+[ -n "$goto" ] && uzbl-browser $goto
diff --git a/README b/README
new file mode 100644
index 0000000..4ade000
--- a/dev/null
+++ b/README
@@ -0,0 +1 @@
+Some of these scripts are mine and for the rest author is mentioned in the file.
diff --git a/abs b/abs
new file mode 100755
index 0000000..4d29f2e
--- a/dev/null
+++ b/abs
@@ -0,0 +1,8 @@
+#!/bin/zsh
+pushd ~/Arch/repo/packages
+git pull
+pushd ~/Arch/repo/community
+git pull
+popd
+popd
+
diff --git a/ack b/ack
new file mode 100755
index 0000000..6c8f583
--- a/dev/null
+++ b/ack
@@ -0,0 +1,2778 @@
+#!/usr/bin/env perl
+#
+# This file, ack, is generated code.
+# Please DO NOT EDIT or send patches for it.
+#
+# Please take a look at the source from
+# http://github.com/petdance/ack
+# and submit patches against the individual files
+# that build ack.
+#
+
+use warnings;
+use strict;
+
+our $VERSION = '1.94';
+# Check http://betterthangrep.com/ for updates
+
+# These are all our globals.
+
+
+MAIN: {
+ if ( $App::Ack::VERSION ne $main::VERSION ) {
+ App::Ack::die( "Program/library version mismatch\n\t$0 is $main::VERSION\n\t$INC{'App/Ack.pm'} is $App::Ack::VERSION" );
+ }
+
+ # Do preliminary arg checking;
+ my $env_is_usable = 1;
+ for ( @ARGV ) {
+ last if ( $_ eq '--' );
+
+ # Priorities! Get the --thpppt checking out of the way.
+ /^--th[pt]+t+$/ && App::Ack::_thpppt($_);
+
+ # See if we want to ignore the environment. (Don't tell Al Gore.)
+ if ( /^--(no)?env$/ ) {
+ $env_is_usable = defined $1 ? 0 : 1;
+ }
+ }
+ if ( $env_is_usable ) {
+ unshift( @ARGV, App::Ack::read_ackrc() );
+ }
+ else {
+ my @keys = ( 'ACKRC', grep { /^ACK_/ } keys %ENV );
+ delete @ENV{@keys};
+ }
+ App::Ack::load_colors();
+
+ if ( exists $ENV{ACK_SWITCHES} ) {
+ App::Ack::warn( 'ACK_SWITCHES is no longer supported. Use ACK_OPTIONS.' );
+ }
+
+ App::Ack::show_help();
+ exit 1;
+ }
+
+ main();
+}
+
+sub main {
+ my $opt = App::Ack::get_command_line_options();
+
+ $| = 1 if $opt->{flush}; # Unbuffer the output if flush mode
+
+ if ( App::Ack::input_from_pipe() ) {
+ # We're going into filter mode
+ for ( qw( f g l ) ) {
+ $opt->{$_} and App::Ack::die( "Can't use -$_ when acting as a filter." );
+ }
+ $opt->{show_filename} = 0;
+ $opt->{regex} = App::Ack::build_regex( defined $opt->{regex} ? $opt->{regex} : shift @ARGV, $opt );
+ if ( my $nargs = @ARGV ) {
+ my $s = $nargs == 1 ? '' : 's';
+ App::Ack::warn( "Ignoring $nargs argument$s on the command-line while acting as a filter." );
+ }
+
+ my $res = App::Ack::Resource::Basic->new( '-' );
+ my $nmatches;
+ if ( $opt->{count} ) {
+ $nmatches = App::Ack::search_and_list( $res, $opt );
+ }
+ else {
+ # normal searching
+ $nmatches = App::Ack::search_resource( $res, $opt );
+ }
+ $res->close();
+ App::Ack::exit_from_ack( $nmatches );
+ }
+
+ my $file_matching = $opt->{f} || $opt->{lines};
+ if ( $file_matching ) {
+ App::Ack::die( "Can't specify both a regex ($opt->{regex}) and use one of --line, -f or -g." ) if $opt->{regex};
+ }
+ else {
+ $opt->{regex} = App::Ack::build_regex( defined $opt->{regex} ? $opt->{regex} : shift @ARGV, $opt );
+ }
+
+ # check that all regexes do compile fine
+ App::Ack::check_regex( $_ ) for ( $opt->{regex}, $opt->{G} );
+
+ my $what = App::Ack::get_starting_points( \@ARGV, $opt );
+ my $iter = App::Ack::get_iterator( $what, $opt );
+ App::Ack::filetype_setup();
+
+ my $nmatches = 0;
+
+ App::Ack::set_up_pager( $opt->{pager} ) if defined $opt->{pager};
+ if ( $opt->{f} ) {
+ $nmatches = App::Ack::print_files( $iter, $opt );
+ }
+ elsif ( $opt->{l} || $opt->{count} ) {
+ $nmatches = App::Ack::print_files_with_matches( $iter, $opt );
+ }
+ else {
+ $nmatches = App::Ack::print_matches( $iter, $opt );
+ }
+ close $App::Ack::fh;
+ App::Ack::exit_from_ack( $nmatches );
+}
+
+=head1 NAME
+
+ack - grep-like text finder
+
+=head1 SYNOPSIS
+
+ ack [options] PATTERN [FILE...]
+ ack -f [options] [DIRECTORY...]
+
+=head1 DESCRIPTION
+
+Ack is designed as a replacement for 99% of the uses of F<grep>.
+
+Ack searches the named input FILEs (or standard input if no files are
+named, or the file name - is given) for lines containing a match to the
+given PATTERN. By default, ack prints the matching lines.
+
+Ack can also list files that would be searched, without actually searching
+them, to let you take advantage of ack's file-type filtering capabilities.
+
+=head1 FILE SELECTION
+
+I<ack> is intelligent about the files it searches. It knows about
+certain file types, based on both the extension on the file and,
+in some cases, the contents of the file. These selections can be
+made with the B<--type> option.
+
+With no file selections, I<ack> only searches files of types that
+it recognizes. If you have a file called F<foo.wango>, and I<ack>
+doesn't know what a .wango file is, I<ack> won't search it.
+
+The B<-a> option tells I<ack> to select all files, regardless of
+type.
+
+Some files will never be selected by I<ack>, even with B<-a>,
+including:
+
+=over 4
+
+=item * Backup files: Files matching F<#*#> or ending with F<~>.
+
+=item * Coredumps: Files matching F<core.\d+>
+
+=back
+
+However, I<ack> always searches the files given on the command line,
+no matter what type. Furthermore, by specifying the B<-u> option all
+files will be searched.
+
+=head1 DIRECTORY SELECTION
+
+I<ack> descends through the directory tree of the starting directories
+specified. However, it will ignore the shadow directories used by
+many version control systems, and the build directories used by the
+Perl MakeMaker system. You may add or remove a directory from this
+list with the B<--[no]ignore-dir> option. The option may be repeated
+to add/remove multiple directories from the ignore list.
+
+For a complete list of directories that do not get searched, run
+F<ack --help>.
+
+=head1 WHEN TO USE GREP
+
+I<ack> trumps I<grep> as an everyday tool 99% of the time, but don't
+throw I<grep> away, because there are times you'll still need it.
+
+E.g., searching through huge files looking for regexes that can be
+expressed with I<grep> syntax should be quicker with I<grep>.
+
+If your script or parent program uses I<grep> C<--quiet> or
+C<--silent> or needs exit 2 on IO error, use I<grep>.
+
+=head1 OPTIONS
+
+=over 4
+
+=item B<-a>, B<--all>
+
+Operate on all files, regardless of type (but still skip directories
+like F<blib>, F<CVS>, etc.)
+
+=item B<-A I<NUM>>, B<--after-context=I<NUM>>
+
+Print I<NUM> lines of trailing context after matching lines.
+
+=item B<-B I<NUM>>, B<--before-context=I<NUM>>
+
+Print I<NUM> lines of leading context before matching lines.
+
+=item B<-C [I<NUM>]>, B<--context[=I<NUM>]>
+
+Print I<NUM> lines (default 2) of context around matching lines.
+
+=item B<-c>, B<--count>
+
+Suppress normal output; instead print a count of matching lines for
+each input file. If B<-l> is in effect, it will only show the
+number of lines for each file that has lines matching. Without
+B<-l>, some line counts may be zeroes.
+
+If combined with B<-h> (B<--no-filename>) ack outputs only one total count.
+
+=item B<--color>, B<--nocolor>
+
+B<--color> highlights the matching text. B<--nocolor> supresses
+the color. This is on by default unless the output is redirected.
+
+On Windows, this option is off by default unless the
+L<Win32::Console::ANSI> module is installed or the C<ACK_PAGER_COLOR>
+environment variable is used.
+
+=item B<--color-filename=I<color>>
+
+Sets the color to be used for filenames.
+
+=item B<--color-match=I<color>>
+
+Sets the color to be used for matches.
+
+=item B<--color-lineno=I<color>>
+
+Sets the color to be used for line numbers.
+
+=item B<--column>
+
+Show the column number of the first match. This is helpful for editors
+that can place your cursor at a given position.
+
+=item B<--env>, B<--noenv>
+
+B<--noenv> disables all environment processing. No F<.ackrc> is read
+and all environment variables are ignored. By default, F<ack> considers
+F<.ackrc> and settings in the environment.
+
+=item B<--flush>
+
+B<--flush> flushes output immediately. This is off by default
+unless ack is running interactively (when output goes to a pipe
+or file).
+
+=item B<-f>
+
+Only print the files that would be searched, without actually doing
+any searching. PATTERN must not be specified, or it will be taken as
+a path to search.
+
+=item B<--follow>, B<--nofollow>
+
+Follow or don't follow symlinks, other than whatever starting files
+or directories were specified on the command line.
+
+This is off by default.
+
+=item B<-G I<REGEX>>
+
+Only paths matching I<REGEX> are included in the search. The entire
+path and filename are matched against I<REGEX>, and I<REGEX> is a
+Perl regular expression, not a shell glob.
+
+The options B<-i>, B<-w>, B<-v>, and B<-Q> do not apply to this I<REGEX>.
+
+=item B<-g I<REGEX>>
+
+Print files where the relative path + filename matches I<REGEX>. This option is
+a convenience shortcut for B<-f> B<-G I<REGEX>>.
+
+The options B<-i>, B<-w>, B<-v>, and B<-Q> do not apply to this I<REGEX>.
+
+=item B<--group>, B<--nogroup>
+
+B<--group> groups matches by file name with. This is the default when
+used interactively.
+
+B<--nogroup> prints one result per line, like grep. This is the default
+when output is redirected.
+
+=item B<-H>, B<--with-filename>
+
+Print the filename for each match.
+
+=item B<-h>, B<--no-filename>
+
+Suppress the prefixing of filenames on output when multiple files are
+searched.
+
+=item B<--help>
+
+Print a short help statement.
+
+=item B<-i>, B<--ignore-case>
+
+Ignore case in the search strings.
+
+This applies only to the PATTERN, not to the regexes given for the B<-g>
+and B<-G> options.
+
+=item B<--[no]ignore-dir=I<DIRNAME>>
+
+Ignore directory (as CVS, .svn, etc are ignored). May be used multiple times
+to ignore multiple directories. For example, mason users may wish to include
+B<--ignore-dir=data>. The B<--noignore-dir> option allows users to search
+directories which would normally be ignored (perhaps to research the contents
+of F<.svn/props> directories).
+
+The I<DIRNAME> must always be a simple directory name. Nested directories like
+F<foo/bar> are NOT supported. You would need to specify B<--ignore-dir=foo> and
+then no files from any foo directory are taken into account by ack unless given
+explicitly on the command line.
+
+=item B<--line=I<NUM>>
+
+Only print line I<NUM> of each file. Multiple lines can be given with multiple
+B<--line> options or as a comma separated list (B<--line=3,5,7>). B<--line=4-7>
+also works. The lines are always output in ascending order, no matter the
+order given on the command line.
+
+=item B<-l>, B<--files-with-matches>
+
+Only print the filenames of matching files, instead of the matching text.
+
+=item B<-L>, B<--files-without-matches>
+
+Only print the filenames of files that do I<NOT> match. This is equivalent
+to specifying B<-l> and B<-v>.
+
+=item B<--match I<REGEX>>
+
+Specify the I<REGEX> explicitly. This is helpful if you don't want to put the
+regex as your first argument, e.g. when executing multiple searches over the
+same set of files.
+
+ # search for foo and bar in given files
+ ack file1 t/file* --match foo
+ ack file1 t/file* --match bar
+
+=item B<-m=I<NUM>>, B<--max-count=I<NUM>>
+
+Stop reading a file after I<NUM> matches.
+
+=item B<--man>
+
+Print this manual page.
+
+=item B<-n>, B<--no-recurse>
+
+No descending into subdirectories.
+
+=item B<-o>
+
+Show only the part of each line matching PATTERN (turns off text
+highlighting)
+
+=item B<--output=I<expr>>
+
+Output the evaluation of I<expr> for each line (turns off text
+highlighting)
+
+=item B<--pager=I<program>>
+
+Direct ack's output through I<program>. This can also be specified
+via the C<ACK_PAGER> and C<ACK_PAGER_COLOR> environment variables.
+
+Using --pager does not suppress grouping and coloring like piping
+output on the command-line does.
+
+=item B<--passthru>
+
+Prints all lines, whether or not they match the expression. Highlighting
+will still work, though, so it can be used to highlight matches while
+still seeing the entire file, as in:
+
+ # Watch a log file, and highlight a certain IP address
+ $ tail -f ~/access.log | ack --passthru 123.45.67.89
+
+=item B<--print0>
+
+Only works in conjunction with -f, -g, -l or -c (filename output). The filenames
+are output separated with a null byte instead of the usual newline. This is
+helpful when dealing with filenames that contain whitespace, e.g.
+
+ # remove all files of type html
+ ack -f --html --print0 | xargs -0 rm -f
+
+=item B<-Q>, B<--literal>
+
+Quote all metacharacters in PATTERN, it is treated as a literal.
+
+This applies only to the PATTERN, not to the regexes given for the B<-g>
+and B<-G> options.
+
+=item B<-r>, B<-R>, B<--recurse>
+
+Recurse into sub-directories. This is the default and just here for
+compatibility with grep. You can also use it for turning B<--no-recurse> off.
+
+=item B<--smart-case>, B<--no-smart-case>
+
+Ignore case in the search strings if PATTERN contains no uppercase
+characters. This is similar to C<smartcase> in vim. This option is
+off by default.
+
+B<-i> always overrides this option.
+
+This applies only to the PATTERN, not to the regexes given for the
+B<-g> and B<-G> options.
+
+=item B<--sort-files>
+
+Sorts the found files lexically. Use this if you want your file
+listings to be deterministic between runs of I<ack>.
+
+=item B<--show-types>
+
+Outputs the filetypes that ack associates with each file.
+
+Works with B<-f> and B<-g> options.
+
+=item B<--thpppt>
+
+Display the all-important Bill The Cat logo. Note that the exact
+spelling of B<--thpppppt> is not important. It's checked against
+a regular expression.
+
+=item B<--type=TYPE>, B<--type=noTYPE>
+
+Specify the types of files to include or exclude from a search.
+TYPE is a filetype, like I<perl> or I<xml>. B<--type=perl> can
+also be specified as B<--perl>, and B<--type=noperl> can be done
+as B<--noperl>.
+
+If a file is of both type "foo" and "bar", specifying --foo and
+--nobar will exclude the file, because an exclusion takes precedence
+over an inclusion.
+
+Type specifications can be repeated and are ORed together.
+
+See I<ack --help=types> for a list of valid types.
+
+=item B<--type-add I<TYPE>=I<.EXTENSION>[,I<.EXT2>[,...]]>
+
+Files with the given EXTENSION(s) are recognized as being of (the
+existing) type TYPE. See also L</"Defining your own types">.
+
+
+=item B<--type-set I<TYPE>=I<.EXTENSION>[,I<.EXT2>[,...]]>
+
+Files with the given EXTENSION(s) are recognized as being of type
+TYPE. This replaces an existing definition for type TYPE. See also
+L</"Defining your own types">.
+
+=item B<-u>, B<--unrestricted>
+
+All files and directories (including blib/, core.*, ...) are searched,
+nothing is skipped. When both B<-u> and B<--ignore-dir> are used, the
+B<--ignore-dir> option has no effect.
+
+=item B<-v>, B<--invert-match>
+
+Invert match: select non-matching lines
+
+This applies only to the PATTERN, not to the regexes given for the B<-g>
+and B<-G> options.
+
+=item B<--version>
+
+Display version and copyright information.
+
+=item B<-w>, B<--word-regexp>
+
+Force PATTERN to match only whole words. The PATTERN is wrapped with
+C<\b> metacharacters.
+
+This applies only to the PATTERN, not to the regexes given for the B<-g>
+and B<-G> options.
+
+=item B<-1>
+
+Stops after reporting first match of any kind. This is different
+from B<--max-count=1> or B<-m1>, where only one match per file is
+shown. Also, B<-1> works with B<-f> and B<-g>, where B<-m> does
+not.
+
+=back
+
+=head1 THE .ackrc FILE
+
+The F<.ackrc> file contains command-line options that are prepended
+to the command line before processing. Multiple options may live
+on multiple lines. Lines beginning with a # are ignored. A F<.ackrc>
+might look like this:
+
+ # Always sort the files
+ --sort-files
+
+ # Always color, even if piping to a another program
+ --color
+
+ # Use "less -r" as my pager
+ --pager=less -r
+
+Note that arguments with spaces in them do not need to be quoted,
+as they are not interpreted by the shell. Basically, each I<line>
+in the F<.ackrc> file is interpreted as one element of C<@ARGV>.
+
+F<ack> looks in your home directory for the F<.ackrc>. You can
+specify another location with the F<ACKRC> variable, below.
+
+If B<--noenv> is specified on the command line, the F<.ackrc> file
+is ignored.
+
+=head1 Defining your own types
+
+ack allows you to define your own types in addition to the predefined
+types. This is done with command line options that are best put into
+an F<.ackrc> file - then you do not have to define your types over and
+over again. In the following examples the options will always be shown
+on one command line so that they can be easily copy & pasted.
+
+I<ack --perl foo> searches for foo in all perl files. I<ack --help=types>
+tells you, that perl files are files ending
+in .pl, .pm, .pod or .t. So what if you would like to include .xs
+files as well when searching for --perl files? I<ack --type-add perl=.xs --perl foo>
+does this for you. B<--type-add> appends
+additional extensions to an existing type.
+
+If you want to define a new type, or completely redefine an existing
+type, then use B<--type-set>. I<ack --type-set
+eiffel=.e,.eiffel> defines the type I<eiffel> to include files with
+the extensions .e or .eiffel. So to search for all eiffel files
+containing the word Bertrand use I<ack --type-set eiffel=.e,.eiffel --eiffel Bertrand>.
+As usual, you can also write B<--type=eiffel>
+instead of B<--eiffel>. Negation also works, so B<--noeiffel> excludes
+all eiffel files from a search. Redefining also works: I<ack --type-set cc=.c,.h>
+and I<.xs> files no longer belong to the type I<cc>.
+
+When defining your own types in the F<.ackrc> file you have to use
+the following:
+
+ --type-set=eiffel=.e,.eiffel
+
+or writing on separate lines
+
+ --type-set
+ eiffel=.e,.eiffel
+
+The following does B<NOT> work in the F<.ackrc> file:
+
+ --type-set eiffel=.e,.eiffel
+
+
+In order to see all currently defined types, use I<--help types>, e.g.
+I<ack --type-set backup=.bak --type-add perl=.perl --help types>
+
+Restrictions:
+
+=over 4
+
+=item
+
+The types 'skipped', 'make', 'binary' and 'text' are considered "builtin" and
+cannot be altered.
+
+=item
+
+The shebang line recognition of the types 'perl', 'ruby', 'php', 'python',
+'shell' and 'xml' cannot be redefined by I<--type-set>, it is always
+active. However, the shebang line is only examined for files where the
+extension is not recognised. Therefore it is possible to say
+I<ack --type-set perl=.perl --type-set foo=.pl,.pm,.pod,.t --perl --nofoo> and
+only find your shiny new I<.perl> files (and all files with unrecognized extension
+and perl on the shebang line).
+
+=back
+
+=head1 ENVIRONMENT VARIABLES
+
+For commonly-used ack options, environment variables can make life much easier.
+These variables are ignored if B<--noenv> is specified on the command line.
+
+=over 4
+
+=item ACKRC
+
+Specifies the location of the F<.ackrc> file. If this file doesn't
+exist, F<ack> looks in the default location.
+
+=item ACK_OPTIONS
+
+This variable specifies default options to be placed in front of
+any explicit options on the command line.
+
+=item ACK_COLOR_FILENAME
+
+Specifies the color of the filename when it's printed in B<--group>
+mode. By default, it's "bold green".
+
+The recognized attributes are clear, reset, dark, bold, underline,
+underscore, blink, reverse, concealed black, red, green, yellow,
+blue, magenta, on_black, on_red, on_green, on_yellow, on_blue,
+on_magenta, on_cyan, and on_white. Case is not significant.
+Underline and underscore are equivalent, as are clear and reset.
+The color alone sets the foreground color, and on_color sets the
+background color.
+
+This option can also be set with B<--color-filename>.
+
+=item ACK_COLOR_MATCH
+
+Specifies the color of the matching text when printed in B<--color>
+mode. By default, it's "black on_yellow".
+
+This option can also be set with B<--color-match>.
+
+See B<ACK_COLOR_FILENAME> for the color specifications.
+
+=item ACK_COLOR_LINENO
+
+Specifies the color of the line number when printed in B<--color>
+mode. By default, it's "bold yellow".
+
+This option can also be set with B<--color-lineno>.
+
+See B<ACK_COLOR_FILENAME> for the color specifications.
+
+=item ACK_PAGER
+
+Specifies a pager program, such as C<more>, C<less> or C<most>, to which
+ack will send its output.
+
+Using C<ACK_PAGER> does not suppress grouping and coloring like
+piping output on the command-line does, except that on Windows
+ack will assume that C<ACK_PAGER> does not support color.
+
+C<ACK_PAGER_COLOR> overrides C<ACK_PAGER> if both are specified.
+
+=item ACK_PAGER_COLOR
+
+Specifies a pager program that understands ANSI color sequences.
+Using C<ACK_PAGER_COLOR> does not suppress grouping and coloring
+like piping output on the command-line does.
+
+If you are not on Windows, you never need to use C<ACK_PAGER_COLOR>.
+
+=back
+
+=head1 ACK & OTHER TOOLS
+
+=head2 Vim integration
+
+F<ack> integrates easily with the Vim text editor. Set this in your
+F<.vimrc> to use F<ack> instead of F<grep>:
+
+ set grepprg=ack\ -a
+
+That examples uses C<-a> to search through all files, but you may
+use other default flags. Now you can search with F<ack> and easily
+step through the results in Vim:
+
+ :grep Dumper perllib
+
+=head2 Emacs integration
+
+Phil Jackson put together an F<ack.el> extension that "provides a
+simple compilation mode ... has the ability to guess what files you
+want to search for based on the major-mode."
+
+L<http://www.shellarchive.co.uk/content/emacs.html>
+
+=head2 TextMate integration
+
+Pedro Melo is a TextMate user who writes "I spend my day mostly
+inside TextMate, and the built-in find-in-project sucks with large
+projects. So I hacked a TextMate command that was using find +
+grep to use ack. The result is the Search in Project with ack, and
+you can find it here:
+L<http://www.simplicidade.org/notes/archives/2008/03/search_in_proje.html>"
+
+=head2 Shell and Return Code
+
+For greater compatibility with I<grep>, I<ack> in normal use returns
+shell return or exit code of 0 only if something is found and 1 if
+no match is found.
+
+(Shell exit code 1 is C<$?=256> in perl with C<system> or backticks.)
+
+The I<grep> code 2 for errors is not used.
+
+If C<-f> or C<-g> are specified, then 0 is returned if at least one
+file is found. If no files are found, then 1 is returned.
+
+=cut
+
+=head1 DEBUGGING ACK PROBLEMS
+
+If ack gives you output you're not expecting, start with a few simple steps.
+
+=head2 Use B<--noenv>
+
+Your environment variables and F<.ackrc> may be doing things you're
+not expecting, or forgotten you specified. Use B<--noenv> to ignore
+your environment and F<.ackrc>.
+
+=head2 Use B<-f> to see what files you're scanning
+
+The reason I created B<-f> in the first place was as a debugging
+tool. If ack is not finding matches you think it should find, run
+F<ack -f> to see what files are being checked.
+
+=head1 TIPS
+
+=head2 Use the F<.ackrc> file.
+
+The F<.ackrc> is the place to put all your options you use most of
+the time but don't want to remember. Put all your --type-add and
+--type-set definitions in it. If you like --smart-case, set it
+there, too. I also set --sort-files there.
+
+=head2 Use F<-f> for working with big codesets
+
+Ack does more than search files. C<ack -f --perl> will create a
+list of all the Perl files in a tree, ideal for sending into F<xargs>.
+For example:
+
+ # Change all "this" to "that" in all Perl files in a tree.
+ ack -f --perl | xargs perl -p -i -e's/this/that/g'
+
+or if you prefer:
+
+ perl -p -i -e's/this/thatg/' $(ack -f --perl)
+
+=head2 Use F<-Q> when in doubt about metacharacters
+
+If you're searching for something with a regular expression
+metacharacter, most often a period in a filename or IP address, add
+the -Q to avoid false positives without all the backslashing. See
+the following example for more...
+
+=head2 Use ack to watch log files
+
+Here's one I used the other day to find trouble spots for a website
+visitor. The user had a problem loading F<troublesome.gif>, so I
+took the access log and scanned it with ack twice.
+
+ ack -Q aa.bb.cc.dd /path/to/access.log | ack -Q -B5 troublesome.gif
+
+The first ack finds only the lines in the Apache log for the given
+IP. The second finds the match on my troublesome GIF, and shows
+the previous five lines from the log in each case.
+
+=head2 Share your knowledge
+
+Join the ack-users mailing list. Send me your tips and I may add
+them here.
+
+=head1 FAQ
+
+=head2 Why isn't ack finding a match in (some file)?
+
+Probably because it's of a type that ack doesn't recognize. ack's
+searching behavior is driven by filetype. B<If ack doesn't know
+what kind of file it is, ack ignores the file.>
+
+Use the C<-f> switch to see a list of files that ack will search
+for you.
+
+If you want ack to search files that it doesn't recognize, use the
+C<-a> switch.
+
+If you want ack to search every file, even ones that it always
+ignores like coredumps and backup files, use the C<-u> switch.
+
+=head2 Why does ack ignore unknown files by default?
+
+ack is designed by a programmer, for programmers, for searching
+large trees of code. Most codebases have a lot files in them which
+aren't source files (like compiled object files, source control
+metadata, etc), and grep wastes a lot of time searching through all
+of those as well and returning matches from those files.
+
+That's why ack's behavior of not searching things it doesn't recognize
+is one of its greatest strengths: the speed you get from only
+searching the things that you want to be looking at.
+
+=head2 Wouldn't it be great if F<ack> did search & replace?
+
+No, ack will always be read-only. Perl has a perfectly good way
+to do search & replace in files, using the C<-i>, C<-p> and C<-n>
+switches.
+
+You can certainly use ack to select your files to update. For
+example, to change all "foo" to "bar" in all PHP files, you can do
+this from the Unix shell:
+
+ $ perl -i -p -e's/foo/bar/g' $(ack -f --php)
+
+=head2 Can you make ack recognize F<.xyz> files?
+
+That's an enhancement. Please see the section in the manual about
+enhancements.
+
+=head2 There's already a program/package called ack.
+
+Yes, I know.
+
+=head2 Why is it called ack if it's called ack-grep?
+
+The name of the program is "ack". Some packagers have called it
+"ack-grep" when creating packages because there's already a package
+out there called "ack" that has nothing to do with this ack.
+
+I suggest you make a symlink named F<ack> that points to F<ack-grep>
+because one of the crucial benefits of ack is having a name that's
+so short and simple to type.
+
+To do that, run this with F<sudo> or as root:
+
+ ln -s /usr/bin/ack-grep /usr/bin/ack
+
+=head2 What does F<ack> mean?
+
+Nothing. I wanted a name that was easy to type and that you could
+pronounce as a single syllable.
+
+=head2 Can I do multi-line regexes?
+
+No, ack does not support regexes that match multiple lines. Doing
+so would require reading in the entire file at a time.
+
+If you want to see lines near your match, use the C<--A>, C<--B>
+and C<--C> switches for displaying context.
+
+=head1 AUTHOR
+
+Andy Lester, C<< <andy at petdance.com> >>
+
+=head1 BUGS
+
+Please report any bugs or feature requests to the issues list at
+Github: L<http://github.com/petdance/ack/issues>
+
+=head1 ENHANCEMENTS
+
+All enhancement requests MUST first be posted to the ack-users
+mailing list at L<http://groups.google.com/group/ack-users>. I
+will not consider a request without it first getting seen by other
+ack users. This includes requests for new filetypes.
+
+There is a list of enhancements I want to make to F<ack> in the ack
+issues list at Github: L<http://github.com/petdance/ack/issues>
+
+Patches are always welcome, but patches with tests get the most
+attention.
+
+=head1 SUPPORT
+
+Support for and information about F<ack> can be found at:
+
+=over 4
+
+=item * The ack homepage
+
+L<http://betterthangrep.com/>
+
+=item * The ack issues list at Github
+
+L<http://github.com/petdance/ack/issues>
+
+=item * AnnoCPAN: Annotated CPAN documentation
+
+L<http://annocpan.org/dist/ack>
+
+=item * CPAN Ratings
+
+L<http://cpanratings.perl.org/d/ack>
+
+=item * Search CPAN
+
+L<http://search.cpan.org/dist/ack>
+
+=item * Git source repository
+
+L<http://github.com/petdance/ack>
+
+=back
+
+=head1 ACKNOWLEDGEMENTS
+
+How appropriate to have I<ack>nowledgements!
+
+Thanks to everyone who has contributed to ack in any way, including
+Nick Hooey,
+Bo Borgerson,
+Mark Szymanski,
+Marq Schneider,
+Packy Anderson,
+JR Boyens,
+Dan Sully,
+Ryan Niebur,
+Kent Fredric,
+Mike Morearty,
+Ingmar Vanhassel,
+Eric Van Dewoestine,
+Sitaram Chamarty,
+Adam James,
+Richard Carlsson,
+Pedro Melo,
+AJ Schuster,
+Phil Jackson,
+Michael Schwern,
+Jan Dubois,
+Christopher J. Madsen,
+Matthew Wickline,
+David Dyck,
+Jason Porritt,
+Jjgod Jiang,
+Thomas Klausner,
+Uri Guttman,
+Peter Lewis,
+Kevin Riggle,
+Ori Avtalion,
+Torsten Blix,
+Nigel Metheringham,
+GE<aacute>bor SzabE<oacute>,
+Tod Hagan,
+Michael Hendricks,
+E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason,
+Piers Cawley,
+Stephen Steneker,
+Elias Lutfallah,
+Mark Leighton Fisher,
+Matt Diephouse,
+Christian Jaeger,
+Bill Sully,
+Bill Ricker,
+David Golden,
+Nilson Santos F. Jr,
+Elliot Shank,
+Merijn Broeren,
+Uwe Voelker,
+Rick Scott,
+Ask BjE<oslash>rn Hansen,
+Jerry Gay,
+Will Coleda,
+Mike O'Regan,
+Slaven ReziE<0x107>,
+Mark Stosberg,
+David Alan Pisoni,
+Adriano Ferreira,
+James Keenan,
+Leland Johnson,
+Ricardo Signes
+and Pete Krawczyk.
+
+=head1 COPYRIGHT & LICENSE
+
+Copyright 2005-2010 Andy Lester.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the Artistic License v2.0.
+
+=cut
+package File::Next;
+
+use strict;
+use warnings;
+
+
+our $VERSION = '1.06';
+
+
+
+use File::Spec ();
+
+
+our $name; # name of the current file
+our $dir; # dir of the current file
+
+our %files_defaults;
+our %skip_dirs;
+
+BEGIN {
+ %files_defaults = (
+ file_filter => undef,
+ descend_filter => undef,
+ error_handler => sub { CORE::die @_ },
+ sort_files => undef,
+ follow_symlinks => 1,
+ );
+ %skip_dirs = map {($_,1)} (File::Spec->curdir, File::Spec->updir);
+}
+
+
+sub files {
+ ($_[0] eq __PACKAGE__) && die 'File::Next::files must not be invoked as File::Next->files';
+
+ my ($parms,@queue) = _setup( \%files_defaults, @_ );
+ my $filter = $parms->{file_filter};
+
+ return sub {
+ while (@queue) {
+ my ($dir,$file,$fullpath) = splice( @queue, 0, 3 );
+ if ( -f $fullpath ) {
+ if ( $filter ) {
+ local $_ = $file;
+ local $File::Next::dir = $dir;
+ local $File::Next::name = $fullpath;
+ next if not $filter->();
+ }
+ return wantarray ? ($dir,$file,$fullpath) : $fullpath;
+ }
+ elsif ( -d _ ) {
+ unshift( @queue, _candidate_files( $parms, $fullpath ) );
+ }
+ } # while
+
+ return;
+ }; # iterator
+}
+
+
+
+
+
+
+
+sub sort_standard($$) { return $_[0]->[1] cmp $_[1]->[1] }
+sub sort_reverse($$) { return $_[1]->[1] cmp $_[0]->[1] }
+
+sub reslash {
+ my $path = shift;
+
+ my @parts = split( /\//, $path );
+
+ return $path if @parts < 2;
+
+ return File::Spec->catfile( @parts );
+}
+
+
+
+sub _setup {
+ my $defaults = shift;
+ my $passed_parms = ref $_[0] eq 'HASH' ? {%{+shift}} : {}; # copy parm hash
+
+ my %passed_parms = %{$passed_parms};
+
+ my $parms = {};
+ for my $key ( keys %{$defaults} ) {
+ $parms->{$key} =
+ exists $passed_parms{$key}
+ ? delete $passed_parms{$key}
+ : $defaults->{$key};
+ }
+
+ # Any leftover keys are bogus
+ for my $badkey ( keys %passed_parms ) {
+ my $sub = (caller(1))[3];
+ $parms->{error_handler}->( "Invalid option passed to $sub(): $badkey" );
+ }
+
+ # If it's not a code ref, assume standard sort
+ if ( $parms->{sort_files} && ( ref($parms->{sort_files}) ne 'CODE' ) ) {
+ $parms->{sort_files} = \&sort_standard;
+ }
+ my @queue;
+
+ for ( @_ ) {
+ my $start = reslash( $_ );
+ if (-d $start) {
+ push @queue, ($start,undef,$start);
+ }
+ else {
+ push @queue, (undef,$start,$start);
+ }
+ }
+
+ return ($parms,@queue);
+}
+
+
+sub _candidate_files {
+ my $parms = shift;
+ my $dir = shift;
+
+ my $dh;
+ if ( !opendir $dh, $dir ) {
+ $parms->{error_handler}->( "$dir: $!" );
+ return;
+ }
+
+ my @newfiles;
+ my $descend_filter = $parms->{descend_filter};
+ my $follow_symlinks = $parms->{follow_symlinks};
+ my $sort_sub = $parms->{sort_files};
+
+ for my $file ( grep { !exists $skip_dirs{$_} } readdir $dh ) {
+ my $has_stat;
+
+ # Only do directory checking if we have a descend_filter
+ my $fullpath = File::Spec->catdir( $dir, $file );
+ if ( !$follow_symlinks ) {
+ next if -l $fullpath;
+ $has_stat = 1;
+ }
+
+ if ( $descend_filter ) {
+ if ( $has_stat ? (-d _) : (-d $fullpath) ) {
+ local $File::Next::dir = $fullpath;
+ local $_ = $file;
+ next if not $descend_filter->();
+ }
+ }
+ if ( $sort_sub ) {
+ push( @newfiles, [ $dir, $file, $fullpath ] );
+ }
+ else {
+ push( @newfiles, $dir, $file, $fullpath );
+ }
+ }
+ closedir $dh;
+
+ if ( $sort_sub ) {
+ return map { @{$_} } sort $sort_sub @newfiles;
+ }
+
+ return @newfiles;
+}
+
+
+1; # End of File::Next
+package App::Ack;
+
+use warnings;
+use strict;
+
+
+
+
+our $VERSION;
+our $COPYRIGHT;
+BEGIN {
+ $VERSION = '1.94';
+ $COPYRIGHT = 'Copyright 2005-2010 Andy Lester.';
+}
+
+our $fh;
+
+BEGIN {
+ $fh = *STDOUT;
+}
+
+
+our %types;
+our %type_wanted;
+our %mappings;
+our %ignore_dirs;
+
+our $input_from_pipe;
+our $output_to_pipe;
+
+our $dir_sep_chars;
+our $is_cygwin;
+our $is_windows;
+
+use File::Spec ();
+use File::Glob ':glob';
+use Getopt::Long ();
+
+BEGIN {
+ %ignore_dirs = (
+ '.bzr' => 'Bazaar',
+ '.cdv' => 'Codeville',
+ '~.dep' => 'Interface Builder',
+ '~.dot' => 'Interface Builder',
+ '~.nib' => 'Interface Builder',
+ '~.plst' => 'Interface Builder',
+ '.git' => 'Git',
+ '.hg' => 'Mercurial',
+ '.pc' => 'quilt',
+ '.svn' => 'Subversion',
+ _MTN => 'Monotone',
+ blib => 'Perl module building',
+ CVS => 'CVS',
+ RCS => 'RCS',
+ SCCS => 'SCCS',
+ _darcs => 'darcs',
+ _sgbak => 'Vault/Fortress',
+ 'autom4te.cache' => 'autoconf',
+ 'cover_db' => 'Devel::Cover',
+ _build => 'Module::Build',
+ );
+
+ %mappings = (
+ actionscript => [qw( as mxml )],
+ ada => [qw( ada adb ads )],
+ asm => [qw( asm s )],
+ batch => [qw( bat cmd )],
+ binary => q{Binary files, as defined by Perl's -B op (default: off)},
+ cc => [qw( c h xs )],
+ cfmx => [qw( cfc cfm cfml )],
+ clojure => [qw( clj )],
+ cpp => [qw( cpp cc cxx m hpp hh h hxx )],
+ csharp => [qw( cs )],
+ css => [qw( css )],
+ delphi => [qw( pas int dfm nfm dof dpk dproj groupproj bdsgroup bdsproj )],
+ elisp => [qw( el )],
+ erlang => [qw( erl hrl )],
+ fortran => [qw( f f77 f90 f95 f03 for ftn fpp )],
+ go => [qw( go )],
+ haskell => [qw( hs lhs )],
+ hh => [qw( h )],
+ html => [qw( htm html shtml xhtml )],
+ java => [qw( java properties )],
+ js => [qw( js )],
+ jsp => [qw( jsp jspx jhtm jhtml )],
+ lisp => [qw( lisp lsp )],
+ lua => [qw( lua )],
+ make => q{Makefiles (including *.mk and *.mak)},
+ mason => [qw( mas mhtml mpl mtxt )],
+ objc => [qw( m h )],
+ objcpp => [qw( mm h )],
+ ocaml => [qw( ml mli )],
+ parrot => [qw( pir pasm pmc ops pod pg tg )],
+ perl => [qw( pl pm pod t )],
+ php => [qw( php phpt php3 php4 php5 phtml)],
+ plone => [qw( pt cpt metadata cpy py )],
+ python => [qw( py )],
+ rake => q{Rakefiles},
+ ruby => [qw( rb rhtml rjs rxml erb rake spec )],
+ scala => [qw( scala )],
+ scheme => [qw( scm ss )],
+ shell => [qw( sh bash csh tcsh ksh zsh )],
+ skipped => q{Files, but not directories, normally skipped by ack (default: off)},
+ smalltalk => [qw( st )],
+ sql => [qw( sql ctl )],
+ tcl => [qw( tcl itcl itk )],
+ tex => [qw( tex cls sty )],
+ text => q{Text files, as defined by Perl's -T op (default: off)},
+ tt => [qw( tt tt2 ttml )],
+ vb => [qw( bas cls frm ctl vb resx )],
+ verilog => [qw( v vh sv )],
+ vhdl => [qw( vhd vhdl )],
+ vim => [qw( vim )],
+ yaml => [qw( yaml yml )],
+ xml => [qw( xml dtd xsl xslt ent )],
+ );
+
+ while ( my ($type,$exts) = each %mappings ) {
+ if ( ref $exts ) {
+ for my $ext ( @{$exts} ) {
+ push( @{$types{$ext}}, $type );
+ }
+ }
+ }
+ # add manually Makefile extensions
+ push @{$types{$_}}, 'make' for qw{ mk mak };
+
+ # These have to be checked before any filehandle diddling.
+ $output_to_pipe = not -t *STDOUT;
+ $input_from_pipe = -p STDIN;
+
+ $is_cygwin = ($^O eq 'cygwin');
+ $is_windows = ($^O =~ /MSWin32/);
+ $dir_sep_chars = $is_windows ? quotemeta( '\\/' ) : quotemeta( File::Spec->catfile( '', '' ) );
+}
+
+
+sub read_ackrc {
+ my @files = ( $ENV{ACKRC} );
+ my @dirs =
+ $is_windows
+ ? ( $ENV{HOME}, $ENV{USERPROFILE} )
+ : ( '~', $ENV{HOME} );
+ for my $dir ( grep { defined } @dirs ) {
+ for my $file ( '.ackrc', '_ackrc' ) {
+ push( @files, bsd_glob( "$dir/$file", GLOB_TILDE ) );
+ }
+ }
+ for my $filename ( @files ) {
+ if ( defined $filename && -e $filename ) {
+ open( my $fh, '<', $filename ) or App::Ack::die( "$filename: $!\n" );
+ my @lines = grep { /./ && !/^\s*#/ } <$fh>;
+ chomp @lines;
+ close $fh or App::Ack::die( "$filename: $!\n" );
+
+ # get rid of leading and trailing whitespaces
+ for ( @lines ) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+
+ return @lines;
+ }
+ }
+
+ return;
+}
+
+
+sub get_command_line_options {
+ my %opt = (
+ pager => $ENV{ACK_PAGER_COLOR} || $ENV{ACK_PAGER},
+ );
+
+ my $getopt_specs = {
+ 1 => sub { $opt{1} = $opt{m} = 1 },
+ 'A|after-context=i' => \$opt{after_context},
+ 'B|before-context=i' => \$opt{before_context},
+ 'C|context:i' => sub { shift; my $val = shift; $opt{before_context} = $opt{after_context} = ($val || 2) },
+ 'a|all-types' => \$opt{all},
+ 'break!' => \$opt{break},
+ c => \$opt{count},
+ 'color|colour!' => \$opt{color},
+ 'color-match=s' => \$ENV{ACK_COLOR_MATCH},
+ 'color-filename=s' => \$ENV{ACK_COLOR_FILENAME},
+ 'color-lineno=s' => \$ENV{ACK_COLOR_LINENO},
+ 'column!' => \$opt{column},
+ count => \$opt{count},
+ 'env!' => sub { }, # ignore this option, it is handled beforehand
+ f => \$opt{f},
+ flush => \$opt{flush},
+ 'follow!' => \$opt{follow},
+ 'g=s' => sub { shift; $opt{G} = shift; $opt{f} = 1 },
+ 'G=s' => \$opt{G},
+ 'group!' => sub { shift; $opt{heading} = $opt{break} = shift },
+ 'heading!' => \$opt{heading},
+ 'h|no-filename' => \$opt{h},
+ 'H|with-filename' => \$opt{H},
+ 'i|ignore-case' => \$opt{i},
+ 'invert-file-match' => \$opt{invert_file_match},
+ 'lines=s' => sub { shift; my $val = shift; push @{$opt{lines}}, $val },
+ 'l|files-with-matches' => \$opt{l},
+ 'L|files-without-matches' => sub { $opt{l} = $opt{v} = 1 },
+ 'm|max-count=i' => \$opt{m},
+ 'match=s' => \$opt{regex},
+ 'n|no-recurse' => \$opt{n},
+ o => sub { $opt{output} = '$&' },
+ 'output=s' => \$opt{output},
+ 'pager=s' => \$opt{pager},
+ 'nopager' => sub { $opt{pager} = undef },
+ 'passthru' => \$opt{passthru},
+ 'print0' => \$opt{print0},
+ 'Q|literal' => \$opt{Q},
+ 'r|R|recurse' => sub { $opt{n} = 0 },
+ 'show-types' => \$opt{show_types},
+ 'smart-case!' => \$opt{smart_case},
+ 'sort-files' => \$opt{sort_files},
+ 'u|unrestricted' => \$opt{u},
+ 'v|invert-match' => \$opt{v},
+ 'w|word-regexp' => \$opt{w},
+
+ 'ignore-dirs=s' => sub { shift; my $dir = remove_dir_sep( shift ); $ignore_dirs{$dir} = '--ignore-dirs' },
+ 'noignore-dirs=s' => sub { shift; my $dir = remove_dir_sep( shift ); delete $ignore_dirs{$dir} },
+
+ 'version' => sub { print_version_statement(); exit; },
+ 'help|?:s' => sub { shift; show_help(@_); exit; },
+ 'help-types'=> sub { show_help_types(); exit; },
+ 'man' => sub {
+ require Pod::Usage;
+ Pod::Usage::pod2usage({
+ -verbose => 2,
+ -exitval => 0,
+ });
+ },
+
+ 'type=s' => sub {
+ # Whatever --type=xxx they specify, set it manually in the hash
+ my $dummy = shift;
+ my $type = shift;
+ my $wanted = ($type =~ s/^no//) ? 0 : 1; # must not be undef later
+
+ if ( exists $type_wanted{ $type } ) {
+ $type_wanted{ $type } = $wanted;
+ }
+ else {
+ App::Ack::die( qq{Unknown --type "$type"} );
+ }
+ }, # type sub
+ };
+
+ # Stick any default switches at the beginning, so they can be overridden
+ # by the command line switches.
+ unshift @ARGV, split( ' ', $ENV{ACK_OPTIONS} ) if defined $ENV{ACK_OPTIONS};
+
+ # first pass through options, looking for type definitions
+ def_types_from_ARGV();
+
+ for my $i ( filetypes_supported() ) {
+ $getopt_specs->{ "$i!" } = \$type_wanted{ $i };
+ }
+
+
+ my $parser = Getopt::Long::Parser->new();
+ $parser->configure( 'bundling', 'no_ignore_case', );
+ $parser->getoptions( %{$getopt_specs} ) or
+ App::Ack::die( 'See ack --help, ack --help-types or ack --man for options.' );
+
+ my $to_screen = not output_to_pipe();
+ my %defaults = (
+ all => 0,
+ color => $to_screen,
+ follow => 0,
+ break => $to_screen,
+ heading => $to_screen,
+ before_context => 0,
+ after_context => 0,
+ );
+ if ( $is_windows && $defaults{color} && not $ENV{ACK_PAGER_COLOR} ) {
+ if ( $ENV{ACK_PAGER} || not eval { require Win32::Console::ANSI } ) {
+ $defaults{color} = 0;
+ }
+ }
+ if ( $to_screen && $ENV{ACK_PAGER_COLOR} ) {
+ $defaults{color} = 1;
+ }
+
+ while ( my ($key,$value) = each %defaults ) {
+ if ( not defined $opt{$key} ) {
+ $opt{$key} = $value;
+ }
+ }
+
+ if ( defined $opt{m} && $opt{m} <= 0 ) {
+ App::Ack::die( '-m must be greater than zero' );
+ }
+
+ for ( qw( before_context after_context ) ) {
+ if ( defined $opt{$_} && $opt{$_} < 0 ) {
+ App::Ack::die( "--$_ may not be negative" );
+ }
+ }
+
+ if ( defined( my $val = $opt{output} ) ) {
+ $opt{output} = eval qq[ sub { "$val" } ];
+ }
+ if ( defined( my $l = $opt{lines} ) ) {
+ # --line=1 --line=5 is equivalent to --line=1,5
+ my @lines = split( /,/, join( ',', @{$l} ) );
+
+ # --line=1-3 is equivalent to --line=1,2,3
+ @lines = map {
+ my @ret;
+ if ( /-/ ) {
+ my ($from, $to) = split /-/, $_;
+ if ( $from > $to ) {
+ App::Ack::warn( "ignoring --line=$from-$to" );
+ @ret = ();
+ }
+ else {
+ @ret = ( $from .. $to );
+ }
+ }
+ else {
+ @ret = ( $_ );
+ };
+ @ret
+ } @lines;
+
+ if ( @lines ) {
+ my %uniq;
+ @uniq{ @lines } = ();
+ $opt{lines} = [ sort { $a <=> $b } keys %uniq ]; # numerical sort and each line occurs only once!
+ }
+ else {
+ # happens if there are only ignored --line directives
+ App::Ack::die( 'All --line options are invalid.' );
+ }
+ }
+
+ return \%opt;
+}
+
+
+sub def_types_from_ARGV {
+ my @typedef;
+
+ my $parser = Getopt::Long::Parser->new();
+ # pass_through => leave unrecognized command line arguments alone
+ # no_auto_abbrev => otherwise -c is expanded and not left alone
+ $parser->configure( 'no_ignore_case', 'pass_through', 'no_auto_abbrev' );
+ $parser->getoptions(
+ 'type-set=s' => sub { shift; push @typedef, ['c', shift] },
+ 'type-add=s' => sub { shift; push @typedef, ['a', shift] },
+ ) or App::Ack::die( 'See ack --help or ack --man for options.' );
+
+ for my $td (@typedef) {
+ my ($type, $ext) = split /=/, $td->[1];
+
+ if ( $td->[0] eq 'c' ) {
+ # type-set
+ if ( exists $mappings{$type} ) {
+ # can't redefine types 'make', 'skipped', 'text' and 'binary'
+ App::Ack::die( qq{--type-set: Builtin type "$type" cannot be changed.} )
+ if ref $mappings{$type} ne 'ARRAY';
+
+ delete_type($type);
+ }
+ }
+ else {
+ # type-add
+
+ # can't append to types 'make', 'skipped', 'text' and 'binary'
+ App::Ack::die( qq{--type-add: Builtin type "$type" cannot be changed.} )
+ if exists $mappings{$type} && ref $mappings{$type} ne 'ARRAY';
+
+ App::Ack::warn( qq{--type-add: Type "$type" does not exist, creating with "$ext" ...} )
+ unless exists $mappings{$type};
+ }
+
+ my @exts = split /,/, $ext;
+ s/^\.// for @exts;
+
+ if ( !exists $mappings{$type} || ref($mappings{$type}) eq 'ARRAY' ) {
+ push @{$mappings{$type}}, @exts;
+ for my $e ( @exts ) {
+ push @{$types{$e}}, $type;
+ }
+ }
+ else {
+ App::Ack::die( qq{Cannot append to type "$type".} );
+ }
+ }
+
+ return;
+}
+
+
+sub delete_type {
+ my $type = shift;
+
+ App::Ack::die( qq{Internal error: Cannot delete builtin type "$type".} )
+ unless ref $mappings{$type} eq 'ARRAY';
+
+ delete $mappings{$type};
+ delete $type_wanted{$type};
+ for my $ext ( keys %types ) {
+ $types{$ext} = [ grep { $_ ne $type } @{$types{$ext}} ];
+ }
+}
+
+
+sub ignoredir_filter {
+ return !exists $ignore_dirs{$_} && !exists $ignore_dirs{$File::Next::dir};
+}
+
+
+sub remove_dir_sep {
+ my $path = shift;
+ $path =~ s/[$dir_sep_chars]$//;
+
+ return $path;
+}
+
+
+use constant TEXT => 'text';
+
+sub filetypes {
+ my $filename = shift;
+
+ my $basename = $filename;
+ $basename =~ s{.*[$dir_sep_chars]}{};
+
+ return 'skipped' unless is_searchable( $basename );
+
+ my $lc_basename = lc $basename;
+ return ('make',TEXT) if $lc_basename eq 'makefile' || $lc_basename eq 'gnumakefile';
+ return ('rake','ruby',TEXT) if $lc_basename eq 'rakefile';
+
+ # If there's an extension, look it up
+ if ( $filename =~ m{\.([^\.$dir_sep_chars]+)$}o ) {
+ my $ref = $types{lc $1};
+ return (@{$ref},TEXT) if $ref;
+ }
+
+ # At this point, we can't tell from just the name. Now we have to
+ # open it and look inside.
+
+ return unless -e $filename;
+ # From Elliot Shank:
+ # I can't see any reason that -r would fail on these-- the ACLs look
+ # fine, and no program has any of them open, so the busted Windows
+ # file locking model isn't getting in there. If I comment the if
+ # statement out, everything works fine
+ # So, for cygwin, don't bother trying to check for readability.
+ if ( !$is_cygwin ) {
+ if ( !-r $filename ) {
+ App::Ack::warn( "$filename: Permission denied" );
+ return;
+ }
+ }
+
+ return 'binary' if -B $filename;
+
+ # If there's no extension, or we don't recognize it, check the shebang line
+ my $fh;
+ if ( !open( $fh, '<', $filename ) ) {
+ App::Ack::warn( "$filename: $!" );
+ return;
+ }
+ my $header = <$fh>;
+ close $fh;
+
+ if ( $header =~ /^#!/ ) {
+ return ($1,TEXT) if $header =~ /\b(ruby|p(?:erl|hp|ython))\b/;
+ return ('shell',TEXT) if $header =~ /\b(?:ba|t?c|k|z)?sh\b/;
+ }
+ else {
+ return ('xml',TEXT) if $header =~ /\Q<?xml /i;
+ }
+
+ return (TEXT);
+}
+
+
+sub is_searchable {
+ my $filename = shift;
+
+ # If these are updated, update the --help message
+ return if $filename =~ /[.]bak$/;
+ return if $filename =~ /~$/;
+ return if $filename =~ m{^#.*#$}o;
+ return if $filename =~ m{^core\.\d+$}o;
+ return if $filename =~ m{[._].*\.swp$}o;
+
+ return 1;
+}
+
+
+sub build_regex {
+ my $str = shift;
+ my $opt = shift;
+
+ defined $str or App::Ack::die( 'No regular expression found.' );
+
+ $str = quotemeta( $str ) if $opt->{Q};
+ if ( $opt->{w} ) {
+ $str = "\\b$str" if $str =~ /^\w/;
+ $str = "$str\\b" if $str =~ /\w$/;
+ }
+
+ my $regex_is_lc = $str eq lc $str;
+ if ( $opt->{i} || ($opt->{smart_case} && $regex_is_lc) ) {
+ $str = "(?i)$str";
+ }
+
+ return $str;
+}
+
+
+sub check_regex {
+ my $regex = shift;
+
+ return unless defined $regex;
+
+ eval { qr/$regex/ };
+ (my $error = [email protected]) =~ s/ at \S+ line \d+.*//;
+ chomp($error);
+ App::Ack::die( "Invalid regex '$regex':\n $error" );
+ }
+
+ return;
+}
+
+
+
+
+sub warn {
+ return CORE::warn( _my_program(), ': ', @_, "\n" );
+}
+
+
+sub die {
+ return CORE::die( _my_program(), ': ', @_, "\n" );
+}
+
+sub _my_program {
+ require File::Basename;
+ return File::Basename::basename( $0 );
+}
+
+
+
+sub filetypes_supported {
+ return keys %mappings;
+}
+
+sub _get_thpppt {
+ my $y = q{_ /|,\\'!.x',=(www)=, U };
+ $y =~ tr/,x!w/\nOo_/;
+ return $y;
+}
+
+sub _thpppt {
+ my $y = _get_thpppt();
+ App::Ack::print( "$y ack $_[0]!\n" );
+ exit 0;
+}
+
+sub _key {
+ my $str = lc shift;
+ $str =~ s/[^a-z]//g;
+
+ return $str;
+}
+
+
+sub show_help {
+ my $help_arg = shift || 0;
+
+ return show_help_types() if $help_arg =~ /^types?/;
+
+ my $ignore_dirs = _listify( sort { _key($a) cmp _key($b) } keys %ignore_dirs );
+
+ App::Ack::print( <<"END_OF_HELP" );
+Usage: ack [OPTION]... PATTERN [FILE]
+
+Search for PATTERN in each source file in the tree from cwd on down.
+If [FILES] is specified, then only those files/directories are checked.
+ack may also search STDIN, but only if no FILE are specified, or if
+one of FILES is "-".
+
+Default switches may be specified in ACK_OPTIONS environment variable or
+an .ackrc file. If you want no dependency on the environment, turn it
+off with --noenv.
+
+Example: ack -i select
+
+Searching:
+ -i, --ignore-case Ignore case distinctions in PATTERN
+ --[no]smart-case Ignore case distinctions in PATTERN,
+ only if PATTERN contains no upper case
+ Ignored if -i is specified
+ -v, --invert-match Invert match: select non-matching lines
+ -w, --word-regexp Force PATTERN to match only whole words
+ -Q, --literal Quote all metacharacters; PATTERN is literal
+
+Search output:
+ --line=NUM Only print line(s) NUM of each file
+ -l, --files-with-matches
+ Only print filenames containing matches
+ -L, --files-without-matches
+ Only print filenames with no matches
+ -o Show only the part of a line matching PATTERN
+ (turns off text highlighting)
+ --passthru Print all lines, whether matching or not
+ --output=expr Output the evaluation of expr for each line
+ (turns off text highlighting)
+ --match PATTERN Specify PATTERN explicitly.
+ -m, --max-count=NUM Stop searching in each file after NUM matches
+ -1 Stop searching after one match of any kind
+ -H, --with-filename Print the filename for each match
+ -h, --no-filename Suppress the prefixing filename on output
+ -c, --count Show number of lines matching per file
+ --column Show the column number of the first match
+
+ -A NUM, --after-context=NUM
+ Print NUM lines of trailing context after matching
+ lines.
+ -B NUM, --before-context=NUM
+ Print NUM lines of leading context before matching
+ lines.
+ -C [NUM], --context[=NUM]
+ Print NUM lines (default 2) of output context.
+
+ --print0 Print null byte as separator between filenames,
+ only works with -f, -g, -l, -L or -c.
+
+File presentation:
+ --pager=COMMAND Pipes all ack output through COMMAND. For example,
+ --pager="less -R". Ignored if output is redirected.
+ --nopager Do not send output through a pager. Cancels any
+ setting in ~/.ackrc, ACK_PAGER or ACK_PAGER_COLOR.
+ --[no]heading Print a filename heading above each file's results.
+ (default: on when used interactively)
+ --[no]break Print a break between results from different files.
+ (default: on when used interactively)
+ --group Same as --heading --break
+ --nogroup Same as --noheading --nobreak
+ --[no]color Highlight the matching text (default: on unless
+ output is redirected, or on Windows)
+ --[no]colour Same as --[no]color
+ --color-filename=COLOR
+ --color-match=COLOR
+ --color-lineno=COLOR Set the color for filenames, matches, and line numbers.
+ --flush Flush output immediately, even when ack is used
+ non-interactively (when output goes to a pipe or
+ file).
+
+File finding:
+ -f Only print the files found, without searching.
+ The PATTERN must not be specified.
+ -g REGEX Same as -f, but only print files matching REGEX.
+ --sort-files Sort the found files lexically.
+ --invert-file-match Print/search handle files that do not match -g/-G.
+ --show-types Show which types each file has.
+
+File inclusion/exclusion:
+ -a, --all-types All file types searched;
+ Ignores CVS, .svn and other ignored directories
+ -u, --unrestricted All files and directories searched
+ --[no]ignore-dir=name Add/Remove directory from the list of ignored dirs
+ -r, -R, --recurse Recurse into subdirectories (ack's default behavior)
+ -n, --no-recurse No descending into subdirectories
+ -G REGEX Only search files that match REGEX
+
+ --perl Include only Perl files.
+ --type=perl Include only Perl files.
+ --noperl Exclude Perl files.
+ --type=noperl Exclude Perl files.
+ See "ack --help type" for supported filetypes.
+
+ --type-set TYPE=.EXTENSION[,.EXT2[,...]]
+ Files with the given EXTENSION(s) are recognized as
+ being of type TYPE. This replaces an existing
+ definition for type TYPE.
+ --type-add TYPE=.EXTENSION[,.EXT2[,...]]
+ Files with the given EXTENSION(s) are recognized as
+ being of (the existing) type TYPE
+
+ --[no]follow Follow symlinks. Default is off.
+
+ Directories ignored by default:
+ $ignore_dirs
+
+ Files not checked for type:
+ /~\$/ - Unix backup files
+ /#.+#\$/ - Emacs swap files
+ /[._].*\\.swp\$/ - Vi(m) swap files
+ /core\\.\\d+\$/ - core dumps
+
+Miscellaneous:
+ --noenv Ignore environment variables and ~/.ackrc
+ --help This help
+ --man Man page
+ --version Display version & copyright
+ --thpppt Bill the Cat
+
+Exit status is 0 if match, 1 if no match.
+
+This is version $VERSION of ack.
+END_OF_HELP
+
+ return;
+ }
+
+
+
+sub show_help_types {
+ App::Ack::print( <<'END_OF_HELP' );
+Usage: ack [OPTION]... PATTERN [FILES]
+
+The following is the list of filetypes supported by ack. You can
+specify a file type with the --type=TYPE format, or the --TYPE
+format. For example, both --type=perl and --perl work.
+
+Note that some extensions may appear in multiple types. For example,
+.pod files are both Perl and Parrot.
+
+END_OF_HELP
+
+ my @types = filetypes_supported();
+ my $maxlen = 0;
+ for ( @types ) {
+ $maxlen = length if $maxlen < length;
+ }
+ for my $type ( sort @types ) {
+ next if $type =~ /^-/; # Stuff to not show
+ my $ext_list = $mappings{$type};
+
+ if ( ref $ext_list ) {
+ $ext_list = join( ' ', map { ".$_" } @{$ext_list} );
+ }
+ App::Ack::print( sprintf( " --[no]%-*.*s %s\n", $maxlen, $maxlen, $type, $ext_list ) );
+ }
+
+ return;
+}
+
+sub _listify {
+ my @whats = @_;
+
+ return '' if [email protected];
+
+ my $end = pop @whats;
+ my $str = @whats ? join( ', ', @whats ) . " and $end" : $end;
+
+ no warnings 'once';
+ require Text::Wrap;
+ $Text::Wrap::columns = 75;
+ return Text::Wrap::wrap( '', ' ', $str );
+}
+
+
+sub get_version_statement {
+ require Config;
+
+ my $copyright = get_copyright();
+ my $this_perl = $Config::Config{perlpath};
+ if ($^O ne 'VMS') {
+ my $ext = $Config::Config{_exe};
+ $this_perl .= $ext unless $this_perl =~ m/$ext$/i;
+ }
+ my $ver = sprintf( '%vd', $^V );
+
+ return <<"END_OF_VERSION";
+ack $VERSION
+Running under Perl $ver at $this_perl
+
+$copyright
+
+This program is free software. You may modify or distribute it
+under the terms of the Artistic License v2.0.
+END_OF_VERSION
+}
+
+
+sub print_version_statement {
+ App::Ack::print( get_version_statement() );
+
+ return;
+}
+
+
+sub get_copyright {
+ return $COPYRIGHT;
+}
+
+
+sub load_colors {
+ eval 'use Term::ANSIColor ()';
+
+ $ENV{ACK_COLOR_MATCH} ||= 'black on_yellow';
+ $ENV{ACK_COLOR_FILENAME} ||= 'bold green';
+ $ENV{ACK_COLOR_LINENO} ||= 'bold yellow';
+
+ return;
+}
+
+
+sub is_interesting {
+ return if /^\./;
+
+ my $include;
+
+ for my $type ( filetypes( $File::Next::name ) ) {
+ if ( defined $type_wanted{$type} ) {
+ if ( $type_wanted{$type} ) {
+ $include = 1;
+ }
+ else {
+ return;
+ }
+ }
+ }
+
+ return $include;
+}
+
+
+
+# print subs added in order to make it easy for a third party
+# module (such as App::Wack) to redefine the display methods
+# and show the results in a different way.
+sub print { print {$fh} @_ }
+sub print_first_filename { App::Ack::print( $_[0], "\n" ) }
+sub print_blank_line { App::Ack::print( "\n" ) }
+sub print_separator { App::Ack::print( "--\n" ) }
+sub print_filename { App::Ack::print( $_[0], $_[1] ) }
+sub print_line_no { App::Ack::print( $_[0], $_[1] ) }
+sub print_column_no { App::Ack::print( $_[0], $_[1] ) }
+sub print_count {
+ my $filename = shift;
+ my $nmatches = shift;
+ my $ors = shift;
+ my $count = shift;
+ my $show_filename = shift;
+
+ if ($show_filename) {
+ App::Ack::print( $filename );
+ App::Ack::print( ':', $nmatches ) if $count;
+ }
+ else {
+ App::Ack::print( $nmatches ) if $count;
+ }
+ App::Ack::print( $ors );
+}
+
+sub print_count0 {
+ my $filename = shift;
+ my $ors = shift;
+ my $show_filename = shift;
+
+ if ($show_filename) {
+ App::Ack::print( $filename, ':0', $ors );
+ }
+ else {
+ App::Ack::print( '0', $ors );
+ }
+}
+
+
+
+{
+ my $filename;
+ my $regex;
+ my $display_filename;
+
+ my $keep_context;
+
+ my $last_output_line; # number of the last line that has been output
+ my $any_output; # has there been any output for the current file yet
+ my $context_overall_output_count; # has there been any output at all
+
+sub search_resource {
+ my $res = shift;
+ my $opt = shift;
+
+ $filename = $res->name();
+
+ my $v = $opt->{v};
+ my $passthru = $opt->{passthru};
+ my $max = $opt->{m};
+ my $nmatches = 0;
+
+ $display_filename = undef;
+
+ # for --line processing
+ my $has_lines = 0;
+ my @lines;
+ if ( defined $opt->{lines} ) {
+ $has_lines = 1;
+ @lines = ( @{$opt->{lines}}, -1 );
+ undef $regex; # Don't match when printing matching line
+ }
+ else {
+ $regex = qr/$opt->{regex}/;
+ }
+
+ # for context processing
+ $last_output_line = -1;
+ $any_output = 0;
+ my $before_context = $opt->{before_context};
+ my $after_context = $opt->{after_context};
+
+ $keep_context = ($before_context || $after_context) && !$passthru;
+
+ my @before;
+ my $before_starts_at_line;
+ my $after = 0; # number of lines still to print after a match
+
+ while ( $res->next_text ) {
+ # XXX Optimize away the case when there are no more @lines to find.
+ # XXX $has_lines, $passthru and $v never change. Optimize.
+ if ( $has_lines
+ ? $. != $lines[0] # $lines[0] should be a scalar
+ : $v ? m/$regex/ : !m/$regex/ ) {
+ if ( $passthru ) {
+ App::Ack::print( $_ );
+ next;
+ }
+
+ if ( $keep_context ) {
+ if ( $after ) {
+ print_match_or_context( $opt, 0, $., $-[0], $+[0], $_ );
+ $after--;
+ }
+ elsif ( $before_context ) {
+ if ( @before ) {
+ if ( @before >= $before_context ) {
+ shift @before;
+ ++$before_starts_at_line;
+ }
+ }
+ else {
+ $before_starts_at_line = $.;
+ }
+ push @before, $_;
+ }
+ last if $max && ( $nmatches >= $max ) && !$after;
+ }
+ next;
+ } # not a match
+
+ ++$nmatches;
+
+ # print an empty line as a divider before first line in each file (not before the first file)
+ if ( !$any_output && $opt->{show_filename} && $opt->{break} && defined( $context_overall_output_count ) ) {
+ App::Ack::print_blank_line();
+ }
+
+ shift @lines if $has_lines;
+
+ if ( $res->is_binary ) {
+ App::Ack::print( "Binary file $filename matches\n" );
+ last;
+ }
+ if ( $keep_context ) {
+ if ( @before ) {
+ print_match_or_context( $opt, 0, $before_starts_at_line, $-[0], $+[0], @before );
+ @before = ();
+ $before_starts_at_line = 0;
+ }
+ if ( $max && $nmatches > $max ) {
+ --$after;
+ }
+ else {
+ $after = $after_context;
+ }
+ }
+ print_match_or_context( $opt, 1, $., $-[0], $+[0], $_ );
+
+ last if $max && ( $nmatches >= $max ) && !$after;
+ } # while
+
+ return $nmatches;
+} # search_resource()
+
+
+
+sub print_match_or_context {
+ my $opt = shift; # opts array
+ my $is_match = shift; # is there a match on the line?
+ my $line_no = shift;
+ my $match_start = shift;
+ my $match_end = shift;
+
+ my $color = $opt->{color};
+ my $heading = $opt->{heading};
+ my $show_filename = $opt->{show_filename};
+ my $show_column = $opt->{column};
+
+ if ( $show_filename ) {
+ if ( not defined $display_filename ) {
+ $display_filename =
+ $color
+ ? Term::ANSIColor::colored( $filename, $ENV{ACK_COLOR_FILENAME} )
+ : $filename;
+ if ( $heading && !$any_output ) {
+ App::Ack::print_first_filename($display_filename);
+ }
+ }
+ }
+
+ my $sep = $is_match ? ':' : '-';
+ my $output_func = $opt->{output};
+ for ( @_ ) {
+ if ( $keep_context && !$output_func ) {
+ if ( ( $last_output_line != $line_no - 1 ) &&
+ ( $any_output || ( !$heading && defined( $context_overall_output_count ) ) ) ) {
+ App::Ack::print_separator();
+ }
+ # to ensure separators between different files when --noheading
+
+ $last_output_line = $line_no;
+ }
+
+ if ( $show_filename ) {
+ App::Ack::print_filename($display_filename, $sep) if not $heading;
+ my $display_line_no =
+ $color
+ ? Term::ANSIColor::colored( $line_no, $ENV{ACK_COLOR_LINENO} )
+ : $line_no;
+ App::Ack::print_line_no($display_line_no, $sep);
+ }
+
+ if ( $output_func ) {
+ while ( /$regex/go ) {
+ App::Ack::print( $output_func->() . "\n" );
+ }
+ }
+ else {
+ if ( $color && $is_match && $regex &&
+ s/$regex/Term::ANSIColor::colored( substr($_, $-[0], $+[0] - $-[0]), $ENV{ACK_COLOR_MATCH} )/eg ) {
+ # At the end of the line reset the color and remove newline
+ s/[\r\n]*\z/\e[0m\e[K/;
+ }
+ else {
+ # remove any kind of newline at the end of the line
+ s/[\r\n]*\z//;
+ }
+ if ( $show_column ) {
+ App::Ack::print_column_no( $match_start+1, $sep );
+ }
+ App::Ack::print($_ . "\n");
+ }
+ $any_output = 1;
+ ++$context_overall_output_count;
+ ++$line_no;
+ }
+
+ return;
+} # print_match_or_context()
+
+} # scope around search_resource() and print_match_or_context()
+
+
+TOTAL_COUNT_SCOPE: {
+my $total_count;
+
+sub get_total_count {
+ return $total_count;
+}
+
+sub reset_total_count {
+ $total_count = 0;
+}
+
+
+sub search_and_list {
+ my $res = shift;
+ my $opt = shift;
+
+ my $nmatches = 0;
+ my $count = $opt->{count};
+ my $ors = $opt->{print0} ? "\0" : "\n"; # output record separator
+ my $show_filename = $opt->{show_filename};
+
+ my $regex = qr/$opt->{regex}/;
+
+ if ( $opt->{v} ) {
+ while ( $res->next_text ) {
+ if ( /$regex/ ) {
+ return 0 unless $count;
+ }
+ else {
+ ++$nmatches;
+ }
+ }
+ }
+ else {
+ while ( $res->next_text ) {
+ if ( /$regex/ ) {
+ ++$nmatches;
+ last unless $count;
+ }
+ }
+ }
+
+ if ( $opt->{show_total} ) {
+ $total_count += $nmatches;
+ }
+ else {
+ if ( $nmatches ) {
+ App::Ack::print_count( $res->name, $nmatches, $ors, $count, $show_filename );
+ }
+ elsif ( $count && !$opt->{l} ) {
+ App::Ack::print_count0( $res->name, $ors, $show_filename );
+ }
+ }
+
+ return $nmatches ? 1 : 0;
+} # search_and_list()
+
+} # scope around $total_count
+
+
+
+sub filetypes_supported_set {
+ return grep { defined $type_wanted{$_} && ($type_wanted{$_} == 1) } filetypes_supported();
+}
+
+
+
+sub print_files {
+ my $iter = shift;
+ my $opt = shift;
+
+ my $ors = $opt->{print0} ? "\0" : "\n";
+
+ my $nmatches = 0;
+ while ( defined ( my $file = $iter->() ) ) {
+ App::Ack::print $file, $opt->{show_types} ? " => " . join( ',', filetypes( $file ) ) : (), $ors;
+ $nmatches++;
+ last if $opt->{1};
+ }
+
+ return $nmatches;
+}
+
+
+sub print_files_with_matches {
+ my $iter = shift;
+ my $opt = shift;
+
+ # if we have -l and only 1 file given on command line (this means
+ # show_filename is set to 0), we want to see the filename nevertheless
+ $opt->{show_filename} = 1 if $opt->{l};
+
+ $opt->{show_filename} = 0 if $opt->{h};
+ $opt->{show_filename} = 1 if $opt->{H};
+
+ # abuse options to hand in the show_total parameter to search_and_list
+ $opt->{show_total} = $opt->{count} && !$opt->{show_filename};
+ reset_total_count();
+
+ my $nmatches = 0;
+ while ( defined ( my $filename = $iter->() ) ) {
+ my $repo = App::Ack::Repository::Basic->new( $filename );
+ my $res;
+ while ( $res = $repo->next_resource() ) {
+ $nmatches += search_and_list( $res, $opt );
+ $res->close();
+ last if $nmatches && $opt->{1};
+ }
+ $repo->close();
+ }
+
+ if ( $nmatches && $opt->{show_total} ) {
+ App::Ack::print_count('', get_total_count(), "\n", 1, 0 )
+ }
+
+ return $nmatches;
+}
+
+
+sub print_matches {
+ my $iter = shift;
+ my $opt = shift;
+
+ $opt->{show_filename} = 0 if $opt->{h};
+ $opt->{show_filename} = 1 if $opt->{H};
+
+ my $nmatches = 0;
+ while ( defined ( my $filename = $iter->() ) ) {
+ my $repo;
+ my $tarballs_work = 0;
+ if ( $tarballs_work && $filename =~ /\.tar\.gz$/ ) {
+ App::Ack::die( 'Not working here yet' );
+ require App::Ack::Repository::Tar; # XXX Error checking
+ $repo = App::Ack::Repository::Tar->new( $filename );
+ }
+ else {
+ $repo = App::Ack::Repository::Basic->new( $filename );
+ }
+ $repo or next;
+
+ while ( my $res = $repo->next_resource() ) {
+ my $needs_line_scan;
+ if ( $opt->{regex} && !$opt->{passthru} ) {
+ $needs_line_scan = $res->needs_line_scan( $opt );
+ if ( $needs_line_scan ) {
+ $res->reset();
+ }
+ }
+ else {
+ $needs_line_scan = 1;
+ }
+ if ( $needs_line_scan ) {
+ $nmatches += search_resource( $res, $opt );
+ }
+ $res->close();
+ }
+ last if $nmatches && $opt->{1};
+ $repo->close();
+ }
+ return $nmatches;
+}
+
+
+sub filetype_setup {
+ my $filetypes_supported_set = filetypes_supported_set();
+ # If anyone says --no-whatever, we assume all other types must be on.
+ if ( !$filetypes_supported_set ) {
+ for my $i ( keys %type_wanted ) {
+ $type_wanted{$i} = 1 unless ( defined( $type_wanted{$i} ) || $i eq 'binary' || $i eq 'text' || $i eq 'skipped' );
+ }
+ }
+ return;
+}
+
+
+EXPAND_FILENAMES_SCOPE: {
+ my $filter;
+
+ sub expand_filenames {
+ my $argv = shift;
+
+ my $attr;
+ my @files;
+
+ foreach my $pattern ( @{$argv} ) {
+ my @results = bsd_glob( $pattern );
+
+ if (@results == 0) {
+ @results = $pattern; # Glob didn't match, pass it thru unchanged
+ }
+ elsif ( (@results > 1) or ($results[0] ne $pattern) ) {
+ if (not defined $filter) {
+ eval 'require Win32::File;';
+ $filter = 0;
+ }
+ else {
+ $filter = Win32::File::HIDDEN()|Win32::File::SYSTEM();
+ }
+ } # end unless we've tried to load Win32::File
+ if ( $filter ) {
+ # Filter out hidden and system files:
+ @results = grep { not(Win32::File::GetAttributes($_, $attr) and $attr & $filter) } @results;
+ App::Ack::warn( "$pattern: Matched only hidden files" ) unless @results;
+ } # end if we can filter by file attributes
+ } # end elsif this pattern got expanded
+
+ push @files, @results;
+ } # end foreach pattern
+
+ return \@files;
+ } # end expand_filenames
+} # EXPAND_FILENAMES_SCOPE
+
+
+
+sub get_starting_points {
+ my $argv = shift;
+ my $opt = shift;
+
+ my @what;
+
+ if ( @{$argv} ) {
+ @what = @{ $is_windows ? expand_filenames($argv) : $argv };
+ $_ = File::Next::reslash( $_ ) for @what;
+
+ # Show filenames unless we've specified one single file
+ $opt->{show_filename} = (@what > 1) || (!-f $what[0]);
+ }
+ else {
+ @what = '.'; # Assume current directory
+ $opt->{show_filename} = 1;
+ }
+
+ for my $start_point (@what) {
+ App::Ack::warn( "$start_point: No such file or directory" ) unless -e $start_point;
+ }
+ return \@what;
+}
+
+sub _match {
+ my ( $target, $expression, $invert_flag ) = @_;
+
+ if ( $invert_flag ) {
+ return $target !~ $expression;
+ }
+ else {
+ return $target =~ $expression;
+ }
+}
+
+
+sub get_iterator {
+ my $what = shift;
+ my $opt = shift;
+
+ # Starting points are always searched, no matter what
+ my %starting_point = map { ($_ => 1) } @{$what};
+
+ my $g_regex = defined $opt->{G} ? qr/$opt->{G}/ : undef;
+ my $file_filter;
+
+ if ( $g_regex ) {
+ $file_filter
+ = $opt->{u} ? sub { _match( $File::Next::name, qr/$g_regex/, $opt->{invert_file_match} ) } # XXX Maybe this should be a 1, no?
+ : $opt->{all} ? sub { $starting_point{ $File::Next::name } || ( _match( $File::Next::name, qr/$g_regex/, $opt->{invert_file_match} ) && is_searchable( $_ ) ) }
+ : sub { $starting_point{ $File::Next::name } || ( _match( $File::Next::name, qr/$g_regex/, $opt->{invert_file_match} ) && is_interesting( @ _) ) }
+ ;
+ }
+ else {
+ $file_filter
+ = $opt->{u} ? sub {1}
+ : $opt->{all} ? sub { $starting_point{ $File::Next::name } || is_searchable( $_ ) }
+ : sub { $starting_point{ $File::Next::name } || is_interesting( @_ ) }
+ ;
+ }
+
+ my $descend_filter
+ = $opt->{n} ? sub {0}
+ : $opt->{u} ? sub {1}
+ : \&ignoredir_filter;
+
+ my $iter =
+ File::Next::files( {
+ file_filter => $file_filter,
+ descend_filter => $descend_filter,
+ error_handler => sub { my $msg = shift; App::Ack::warn( $msg ) },
+ sort_files => $opt->{sort_files},
+ follow_symlinks => $opt->{follow},
+ }, @{$what} );
+ return $iter;
+}
+
+
+sub set_up_pager {
+ my $command = shift;
+
+ return if App::Ack::output_to_pipe();
+
+ my $pager;
+ if ( not open( $pager, '|-', $command ) ) {
+ App::Ack::die( qq{Unable to pipe to pager "$command": $!} );
+ }
+ $fh = $pager;
+
+ return;
+}
+
+
+sub input_from_pipe {
+ return $input_from_pipe;
+}
+
+
+
+sub output_to_pipe {
+ return $output_to_pipe;
+}
+
+
+sub exit_from_ack {
+ my $nmatches = shift;
+
+ my $rc = $nmatches ? 0 : 1;
+ exit $rc;
+}
+
+
+
+1; # End of App::Ack
+package App::Ack::Repository;
+
+
+use warnings;
+use strict;
+
+sub FAIL {
+ require Carp;
+ Carp::confess( 'Must be overloaded' );
+}
+
+
+sub new {
+ FAIL();
+}
+
+
+sub next_resource {
+ FAIL();
+}
+
+
+sub close {
+ FAIL();
+}
+
+1;
+package App::Ack::Resource;
+
+
+use warnings;
+use strict;
+
+sub FAIL {
+ require Carp;
+ Carp::confess( 'Must be overloaded' );
+}
+
+
+sub new {
+ FAIL();
+}
+
+
+sub name {
+ FAIL();
+}
+
+
+sub is_binary {
+ FAIL();
+}
+
+
+
+sub needs_line_scan {
+ FAIL();
+}
+
+
+sub reset {
+ FAIL();
+}
+
+
+sub next_text {
+ FAIL();
+}
+
+
+sub close {
+ FAIL();
+}
+
+1;
+package App::Ack::Plugin::Basic;
+
+
+
+package App::Ack::Resource::Basic;
+
+
+use warnings;
+use strict;
+
+
+our @ISA = qw( App::Ack::Resource );
+
+
+sub new {
+ my $class = shift;
+ my $filename = shift;
+
+ my $self = bless {
+ filename => $filename,
+ fh => undef,
+ could_be_binary => undef,
+ opened => undef,
+ id => undef,
+ }, $class;
+
+ if ( $self->{filename} eq '-' ) {
+ $self->{fh} = *STDIN;
+ $self->{could_be_binary} = 0;
+ }
+ else {
+ if ( !open( $self->{fh}, '<', $self->{filename} ) ) {
+ App::Ack::warn( "$self->{filename}: $!" );
+ return;
+ }
+ $self->{could_be_binary} = 1;
+ }
+
+ return $self;
+}
+
+
+sub name {
+ my $self = shift;
+
+ return $self->{filename};
+}
+
+
+sub is_binary {
+ my $self = shift;
+
+ if ( $self->{could_be_binary} ) {
+ return -B $self->{filename};
+ }
+
+ return 0;
+}
+
+
+
+sub needs_line_scan {
+ my $self = shift;
+ my $opt = shift;
+
+ return 1 if $opt->{v};
+
+ my $size = -s $self->{fh};
+ if ( $size == 0 ) {
+ return 0;
+ }
+ elsif ( $size > 100_000 ) {
+ return 1;
+ }
+
+ my $buffer;
+ my $rc = sysread( $self->{fh}, $buffer, $size );
+ if ( not defined $rc ) {
+ App::Ack::warn( "$self->{filename}: $!" );
+ return 1;
+ }
+ return 0 unless $rc && ( $rc == $size );
+
+ my $regex = $opt->{regex};
+ return $buffer =~ /$regex/m;
+}
+
+
+sub reset {
+ my $self = shift;
+
+ seek( $self->{fh}, 0, 0 )
+ or App::Ack::warn( "$self->{filename}: $!" );
+
+ return;
+}
+
+
+sub next_text {
+ if ( defined ($_ = readline $_[0]->{fh}) ) {
+ $. = ++$_[0]->{line};
+ return 1;
+ }
+
+ return;
+}
+
+
+sub close {
+ my $self = shift;
+
+ if ( not close $self->{fh} ) {
+ App::Ack::warn( $self->name() . ": $!" );
+ }
+
+ return;
+}
+
+package App::Ack::Repository::Basic;
+
+
+our @ISA = qw( App::Ack::Repository );
+
+
+use warnings;
+use strict;
+
+sub new {
+ my $class = shift;
+ my $filename = shift;
+
+ my $self = bless {
+ filename => $filename,
+ nexted => 0,
+ }, $class;
+
+ return $self;
+}
+
+
+sub next_resource {
+ my $self = shift;
+
+ return if $self->{nexted};
+ $self->{nexted} = 1;
+
+ return App::Ack::Resource::Basic->new( $self->{filename} );
+}
+
+
+sub close {
+}
+
+
+
+1;
diff --git a/acpihandler b/acpihandler
new file mode 100755
index 0000000..6616d6f
--- a/dev/null
+++ b/acpihandler
@@ -0,0 +1,66 @@
+#!/bin/sh
+# Default acpi script that takes an entry for all actions
+
+# NOTE: This is a 2.6-centric script. If you use 2.4.x, you'll have to
+# modify it to not use /sys
+
+minspeed=`cat /sys/devices/system/cpu/cpufreq/ondemand/cpuinfo_min_freq`
+maxspeed=`cat /sys/devices/system/cpu/cpufreq/ondemand/cpuinfo_max_freq`
+setspeed="/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed"
+
+set $*
+echo "[email protected]" >> /home/raghavendra/logs/acpid.log
+
+case "$1" in
+ button/power)
+ #echo "PowerButton pressed!">/dev/tty5
+ case "$2" in
+ PWRF) logger "PowerButton pressed: $2" ;;
+ *) logger "ACPI action undefined: $2" ;;
+ esac
+ ;;
+ button/sleep)
+ case "$2" in
+ SLPB) echo -n mem >/sys/power/state ;;
+ *) logger "ACPI action undefined: $2" ;;
+ esac
+ ;;
+ ac_adapter)
+ case "$2" in
+ AC)
+ case "$4" in
+ 00000000)
+ echo -n $minspeed >$setspeed
+ #/etc/laptop-mode/laptop-mode start
+ ;;
+ 00000001)
+ echo -n $maxspeed >$setspeed
+ #/etc/laptop-mode/laptop-mode stop
+ ;;
+ esac
+ ;;
+ *) logger "ACPI action undefined: $2" ;;
+ esac
+ ;;
+ battery)
+ case "$2" in
+ BAT0)
+ case "$4" in
+ 00000000) #echo "offline" >/dev/tty5
+ ;;
+ 00000001) #echo "online" >/dev/tty5
+ ;;
+ esac
+ ;;
+ CPU0)
+ ;;
+ *) logger "ACPI action undefined: $2" ;;
+ esac
+ ;;
+ button/lid)
+ logger -p user.info "Lid closed"
+ ;;
+ *)
+ logger -p user.err "ACPI group/action undefined: $1 / $2"
+ ;;
+esac
diff --git a/agvim b/agvim
new file mode 100755
index 0000000..e51b5dc
--- a/dev/null
+++ b/agvim
@@ -0,0 +1,2 @@
+#!/bin/zsh
+xterm -e /usr/bin/vim -f "[email protected]"
diff --git a/alert b/alert
new file mode 100755
index 0000000..31a83fb
--- a/dev/null
+++ b/alert
@@ -0,0 +1,11 @@
+#!/bin/zsh
+needed=$(/bin/df -h | /bin/grep -E '(/home$|/$)')
+
+for line in ${(f)needed};do
+ free=$(awk '{ print $5 }' <<< $line)
+ if [[ ${free%\%} -gt 94 ]];then
+ echo -n -e " Alert: Disk Low"
+ break
+ fi
+done
+#echo -n -e ""
diff --git a/archievm b/archievm
new file mode 100755
index 0000000..4cabb0b
--- a/dev/null
+++ b/archievm
@@ -0,0 +1,83 @@
+#!/bin/zsh
+
+[[ ! $(whoami) == 'root' ]] && exit 1
+
+debugk=0
+nographic=1
+huge=0
+test=1
+runas="raghavendra"
+DIR="/home/raghavendra/Arch/qemu"
+bzImage="$DIR/kvmImage"
+FILE="/media/Inkq/Virt/Archie.img"
+
+
+typeset -A args
+args=(d debugk n nographic g huge I bzImage t test)
+
+while getopts ':gn:dI:th' opt;do
+ case $opt in
+ g|d|t)
+ eval $args[$opt]=1
+ ;;
+ n)
+ nographic=$OPTARG
+ ;;
+ I)
+ bzImage=$OPTARG
+ ;;
+ h)
+ print "$0 [-d] [-g] [-n=0|1] [-I=<image>] [-h]
+ d: debugk
+ g: huge
+ t: test
+ n: nographic (default=1)
+ I: Image (default=$DIR/kvmImage)"
+ exit 1
+ ;;
+ esac
+done
+
+shift $(( OPTIND-1 ))
+
+[[ ! -d /mnt-qemu ]] && mkdir -p /mnt-qemu
+
+if [[ $debugk == 1 ]];then
+ external=" -kernel $bzImage -initrd /boot/kernel26.img -append root=/dev/vda3 "
+fi
+
+if [[ $huge == 1 ]];then
+ echo 296 >| /proc/sys/vm/nr_hugepages
+ #hugearg=" -mem-path /media/hugepages -mem-prealloc "
+ hugearg=" -mem-path /media/hugepages "
+fi
+
+if [[ $nographic == 1 ]];then
+ graph=" -daemonize -nographic -serial file:$DIR/console.log"
+else
+ graph=" -vga std -sdl -no-frame"
+fi
+
+if [[ $test == 1 ]];then
+ snap=" -snapshot -drive file=$FILE,if=virtio "
+else
+ snap=" -drive file=$FILE,if=virtio,cache=writeback,boot=on -boot order=dc "
+fi
+
+qemu-kvm -name Archie -enable-kvm -s -monitor unix:$DIR/archie.sock,server,nowait \
+ -m 512 $hugearg $=snap -smp 2,cores=2,maxcpus=4 \
+ -net nic,model=virtio,vlan=1 -net user,vlan=1,hostfwd=tcp:127.0.0.1:2222-:22 $=graph \
+ -virtfs local,path=$DIR/share,security_model=passthrough,mount_tag=share -balloon virtio \
+ -virtfs local,path=$DIR/module,security_model=passthrough,mount_tag=module \
+ -virtfs local,path=/var/cache/pacman/pkg,security_model=passthrough,mount_tag=pacman \
+ $=external -runas $runas $=EARGS \
+ || exit 1
+
+
+echo 1 >| /sys/kernel/mm/ksm/run
+echo "sudo -u $runas sshfs -p 2222 [email protected]:/ /mnt-qemu" | xclip -i
+rlwrap -pgreen socat - $DIR/archie.sock
+#qemu-kvm -name Archie -enable-kvm -m 512 -snapshot -drive file=/media/Inkq/Virt/Archie.img,if=virtio -vga std -kernel ~/Arch/qemu/kvmImage -initrd /boot/kernel26.img -append root=/dev/vda3
+#qemu-kvm -name Archie -enable-kvm -m 512 -snapshot -hda /media/Inkq/Virt/Archie.img -vga std -kernel ~/Arch/qemu/kvmImage -initrd /boot/kernel26-wye.img -append "root=/dev/sda3 console=ttyS0 earlyprintk=serial,ttyS0" -serial file:/tmp/x.log#
+
diff --git a/aria b/aria
new file mode 100755
index 0000000..e4793c7
--- a/dev/null
+++ b/aria
@@ -0,0 +1,7 @@
+#!/bin/zsh
+if ! ~/bin/downloadStats 1;then
+ shellrun download "[email protected]"
+else
+ echo "shellrun download [email protected]" >> ~/.dque
+fi
+
diff --git a/aria2mon b/aria2mon
new file mode 100755
index 0000000..9082e96
--- a/dev/null
+++ b/aria2mon
@@ -0,0 +1,146 @@
+#!/usr/bin/env ruby
+# The MIT License
+#
+# Copyright (c) 2009 Tatsuhiro Tsujikawa
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+require 'xmlrpc/client'
+require 'optparse'
+
+program_name=File.basename($0)
+options={}
+args=nil
+OptionParser.new do |opt|
+ opt.on("--server SERVER", "hostname of XML-RPC server. Default: localhost"){|val| options["server"]=val }
+ opt.on("--port PORT", "port of XML-RPC server. Default: 6800"){|val| options["port"]=val }
+
+ opt.on("--user USERNAME", "XML-RPC username"){|val| options["user"]=val }
+ opt.on("--passwd PASSWORD", "XML-RPC password"){|val| options["passwd"]=val }
+
+ opt.banner=<<EOS
+Usage: #{program_name} [options]
+EOS
+
+ args=opt.parse(ARGV)
+end
+
+def compute_eta speed,rem_length
+ return "n/a" if speed == 0
+ remsec=rem_length/speed
+ hr=remsec/3600
+ remsec=remsec%3600
+ min=remsec/60
+ remsec=remsec%60
+ result=""
+ result += "#{hr}h" if hr > 0
+ result += "#{min}m" if min > 0
+ result += "#{remsec}s"
+end
+
+def abbrev value
+ n=value/1024.0
+ if n < 1 then
+ return "#{value}"
+ end
+ value=n
+ n=value/1024.0
+ if n < 1 then
+ return sprintf("%.1fKi", value)
+ else
+ return sprintf("%.1fMi", n)
+ end
+end
+
+auth=""
+if options.has_key?("user") then
+ auth=options["user"]+":"+options["passwd"]+"@"
+end
+if not options.has_key?("server") then
+ options["server"]="localhost"
+end
+if not options.has_key?("port") then
+ options["port"]="6800"
+end
+
+client=XMLRPC::Client.new3({:host => options["server"],
+ :port => options["port"],
+ :path => "/rpc",
+ :user => options["user"],
+ :password => options["passwd"]})
+
+options.delete("server")
+options.delete("port")
+options.delete("user")
+options.delete("passwd")
+
+result=client.call("aria2.tellActive")
+
+print "-- Download Progress --\n"
+result.each { |entry|
+ gid=entry['gid']
+ total_length=entry['totalLength'].to_i
+ completed_length=entry['completedLength'].to_i
+ upload_length=entry['uploadLength'].to_i
+ download_speed=entry['downloadSpeed'].to_i
+ print "GID##{gid}"
+ if total_length == completed_length then
+ if entry.key? 'infoHash' then
+ # for BitTorrent print seed status
+ print " SEEDING"
+ if completed_length > 0 then
+ print "(#{upload_length*100/completed_length}%)"
+ end
+ end
+ else
+ print " SIZE:#{abbrev completed_length}B/#{abbrev total_length}B"
+ if total_length > 0 then
+ print "(#{completed_length*100/total_length}%)"
+ end
+ end
+ print " CN:#{entry['connections']}"
+ if entry.key? 'numSeeders' then
+ print " SEED:#{entry['numSeeders']}"
+ end
+ print " SPD:#{abbrev download_speed}B/s"
+ if entry.key? 'infoHash'
+ printf " UP:#{abbrev entry['uploadSpeed'].to_i}B/s(#{abbrev upload_length}B)"
+ end
+ print " ETA:#{compute_eta(download_speed, total_length-completed_length)}"
+ print "\n"
+
+ if entry.key? 'infoHash'
+ print " InfoHash:#{entry['infoHash']}"
+ end
+ print "\n"
+
+ files=client.call("aria2.getFiles",entry['gid'])
+ if files.length > 0 then
+ first_file=files.find{|file| file["selected"]=="true"}
+ if first_file != nil then
+ print " File:#{first_file['path']}"
+ count=0
+ files.each {|file| count += 1 if file["selected"]=="true"}
+ if count > 1 then
+ print "(#{count-1}more)"
+ end
+ print "\n"
+ end
+ end
+ print "--------------------------------------------------------------------------------\n"
+}
diff --git a/aria2rpc b/aria2rpc
new file mode 100755
index 0000000..d171b2d
--- a/dev/null
+++ b/aria2rpc
@@ -0,0 +1,361 @@
+#!/usr/bin/env ruby
+# The MIT License
+#
+# Copyright (c) 2009 Tatsuhiro Tsujikawa
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+require 'xmlrpc/client'
+require 'pp'
+require 'optparse'
+
+program_name=File.basename($0)
+options={}
+args=nil
+OptionParser.new do |opt|
+ opt.on("-d","--dir DIR"){|val| options["dir"]=val}
+ opt.on("-V","--check-integrity [BOOL]", ["true","false"]){|val|
+ options["check-integrity"]= val||"true"
+ }
+ opt.on("-c","--continue [BOOL]",["true","false"]){|val|
+ options["continue"]=val||"true"
+ }
+ opt.on("--all-proxy PROXY"){|val| options["all-proxy"]=val}
+ opt.on("--all-proxy-user USER"){|val| options["all-proxy-user"]=val}
+ opt.on("--all-proxy-passwd PASSWD"){|val| options["all-proxy-passwd"]=val}
+ opt.on("--connect-timeout SEC"){|val| options["connect-timeout"]=val}
+ opt.on("--dry-run [BOOL]",["true","false"]){|val|
+ options["dry-run"]=val||"true"
+ }
+ opt.on("--lowest-speed-limit SPEED"){|val| options["lowest-speed-limit"]=val}
+ opt.on("--max-file-not-found NUM"){|val| options["max-file-not-found"]=val}
+ opt.on("-m","--max-tries N"){|val| options["max-tries"]=val}
+ opt.on("--no-proxy DOMAINS"){|val| options["no-proxy"]=val}
+ opt.on("-o","--out FILE"){|val| options["out"]=val}
+ opt.on("--proxy-method METHOD"){|val| options["proxy-method"]=val}
+ opt.on("-R","--remote-time [BOOL]",["true","false"]){|val|
+ options["remote-time"]=val||"true"
+ }
+ opt.on("-s","--split N"){|val| options["split"]=val}
+ opt.on("-t","--timeout SEC"){|val| options["timeout"]=val}
+ opt.on("--http-auth-challenge [BOOL]",["true","false"]){|val|
+ options["http-auth-challenge"]=val||"true"
+ }
+ opt.on("--http-no-cache [BOOL]",["true","false"]){|val|
+ options["http-no-cache"]=val||"true"
+ }
+ opt.on("--http-user USER"){|val| options["http-user"]=val}
+ opt.on("--http-passwd PASSWD"){|val| options["http-passwd"]=val}
+ opt.on("--http-proxy PROXY"){|val| options["http-proxy"]=val}
+ opt.on("--http-proxy-user USER"){|val| options["http-proxy-user"]=val}
+ opt.on("--http-proxy-passwd PASSWD"){|val| options["http-proxy-passwd"]=val}
+ opt.on("--https-proxy PROXY"){|val| options["https-proxy"]=val}
+ opt.on("--https-proxy-user USER"){|val| options["https-proxy-user"]=val}
+ opt.on("--https-proxy-passwd PASSWD"){|val| options["https-proxy-passwd"]=val}
+ opt.on("--referer REFERER"){|val| options["referer"]=val}
+ opt.on("--enable-http-keep-alive [BOOL]",["true","false"]){|val|
+ options["enable-http-keep-alive"]=val||"true"
+ }
+ opt.on("--enable-http-pipelining [BOOL]",["true","false"]){|val|
+ options["enable-http-pipelining"]=val||"true"
+ }
+ opt.on("--header HEADER"){|val|
+ options["header"] = [] if options["header"] == nil
+ options["header"] << val
+ }
+ opt.on("--use-head [BOOL]",["true","false"]){|val|
+ options["use-head"]=val||"true"
+ }
+ opt.on("-U","--user-agent USERAGENT"){|val| options["user-agent"]=val}
+ opt.on("--ftp-user USER"){|val| options["ftp-user"]=val}
+ opt.on("--ftp-passwd PASSWD"){|val| options["ftp-passwd"]=val}
+ opt.on("-p","--ftp-pasv [BOOL]",["true","false"]){|val|
+ options["ftp-pasv"]=val||"true"
+ }
+ opt.on("--ftp-proxy PROXY"){|val| options["ftp-proxy"]=val}
+ opt.on("--ftp-proxy-user USER"){|val| options["ftp-proxy-user"]=val}
+ opt.on("--ftp-proxy-passwd PASSWD"){|val| options["ftp-proxy-passwd"]=val}
+ opt.on("--ftp-type TYPE"){|val| options["ftp-type"]=val}
+ opt.on("--ftp-reuse-connection [BOOL]",["true","false"]){|val|
+ options["ftp-reuse-connection"]=val||"true"
+ }
+ opt.on("-n","--no-netrc [BOOL]",["true","false"]){|val|
+ options["no-netrc"]=val||"true"
+ }
+ opt.on("--reuse-uri [BOOL]",["true","false"]){|val|
+ options["reuse-uri"]=val||"true"
+ }
+ opt.on("--select-file INDEXES"){|val| options["select-file"]=val}
+ opt.on("--bt-enable-lpd [BOOL]",["true","false"]){|val|
+ options["bt-enable-lpd"]=val||"true"
+ }
+ opt.on("--bt-external-ip IPADDRESS"){|val| options["bt-external-ip"]=val}
+ opt.on("--bt-hash-check-seed [BOOL]",["true","false"]){|val|
+ options["bt-hash-check-seed"]=val||"true"
+ }
+ opt.on("--bt-max-open-files NUM"){|val| options["bt-max-open-files"]=val}
+ opt.on("--bt-max-peers NUM"){|val| options["bt-max-peers"]=val}
+ opt.on("--bt-metadata-only [BOOL]",["true","false"]){|val|
+ options["bt-metadata-only"]=val||"true"
+ }
+ opt.on("--bt-min-crypto-level LEVEL",["plain","arc4"]){|val|
+ options["bt-min-crypto-level"]=val
+ }
+ opt.on("--bt-prioritize-piece RANGE") {|val|
+ options["bt-prioritize-piece"]=val
+ }
+ opt.on("--bt-require-crypto BOOL",["true","false"]){|val|
+ options["bt-require-crypto"]=val
+ }
+ opt.on("--bt-request-peer-speed-limit SPEED"){|val|
+ options["bt-request-peer-speed-limit"]=val
+ }
+ opt.on("--bt-save-metadata [BOOL]",["true","false"]){|val|
+ options["bt-save-metadata"]=val||"true"
+ }
+ opt.on("--bt-seed-unverified [BOOL]",["true","false"]){|val|
+ options["bt-seed-unverified"]=val||"true"
+ }
+ opt.on("--bt-stop-timeout SEC"){|val| options["bt-stop-timeout"]=val}
+ opt.on("--bt-tracker-interval SEC"){|val| options["bt-tracker-interval"]=val}
+ opt.on("--bt-tracker-timeout SEC"){|val| options["bt-tracker-timeout"]=val}
+ opt.on("--bt-tracker-connect-timeout SEC"){|val|
+ options["bt-tracker-connect-timeout"]=val
+ }
+ opt.on("--enable-peer-exchange [BOOL]",["true","false"]){|val|
+ options["enable-peer-exchange"]=val||"true"
+ }
+ opt.on("--follow-torrent VALUE", ["true","false","mem"]){|val|
+ options["follow-torrent"]=val
+ }
+ opt.on("-O","--index-out INDEXPATH"){|val|
+ options["index-out"]=[] if options["index-out"] == nil
+ options["index-out"] << val
+ }
+ opt.on("-u","--max-upload-limit SPEED"){|val| options["max-upload-limit"]=val}
+ opt.on("--seed-ratio RATIO"){|val| options["seed-ratio"]=val}
+ opt.on("--seed-time MINUTES"){|val| options["seed-time"]=val}
+ opt.on("--follow-metalink VALUE", ["true","false","mem"]){|val|
+ options["follow-metalink"]=val
+ }
+ opt.on("-C","--metalink-servers NUM"){|val| options["metalink-servers"]=val}
+ opt.on("--metalink-language LANG"){|val| options["metalink-language"]=val}
+ opt.on("--metalink-location LOCS"){|val| options["metalink-location"]=val}
+ opt.on("--metalink-os OS"){|val| options["metalink-os"]=val}
+ opt.on("--metalink-version VERSION"){|val| options["metalink-version"]=val}
+ opt.on("--metalink-preferred-protocol PROTO"){|val|
+ options["metalink-preferred-protocol"]=val
+ }
+ opt.on("--metalink-enable-unique-protocol BOOL",["true","false"]){|val|
+ options["metalink-enable-unique-protocol"]=val
+ }
+ opt.on("--allow-overwrite BOOL",["true","false"]){|val|
+ options["allow-overwrite"]=val
+ }
+ opt.on("--allow-piece-length-change BOOL",["true","false"]){|val|
+ options["allow-piece-length-change"]=val
+ }
+ opt.on("--async-dns [BOOL]",["true","false"]){|val|
+ options["async-dns"]=val||"true"
+ }
+ opt.on("--auto-file-renaming [BOOL]",["true","false"]){|val|
+ options["auto-file-renaming"]=val||"true"
+ }
+ opt.on("--file-allocation METHOD",["none","prealloc","falloc"]){|val|
+ options["file-allocation"]=val
+ }
+ opt.on("--max-download-limit LIMIT"){|val| options["max-download-limit"]=val}
+ opt.on("--no-file-allocation-limit SIZE"){|val|
+ options["no-file-allocation-limit"]=val
+ }
+ opt.on("-P","--parameterized-uri [BOOL]",["true","false"]){|val|
+ options["parameterized-uri"]=val||"true"
+ }
+ opt.on("--realtime-chunk-checksum BOOL",["true","false"]){|val|
+ options["realtime-chunk-checksum"]=val
+ }
+ opt.on("--remove-control-file [BOOL]",["true","false"]){|val|
+ options["remove-control-file"]=val||"true"
+ }
+ opt.on("--always-resume [BOOL]",["true","false"]){|val|
+ options["always-resume"]=val||"true"
+ }
+ opt.on("--max-resume-failure-tries N"){|val|
+ options["max-resume-failure-tries"]=val
+ }
+ opt.on("--http-accept-gzip [BOOL]",["true","false"]){|val|
+ options["http-accept-gzip"]=val||"true"
+ }
+ opt.on("--max-connection-per-server NUM"){|val| options["max-connection-per-server"]=val}
+ opt.on("--min-split-size SIZE"){|val| options["min-split-size"]=val}
+ opt.on("--conditional-get [BOOL]",["true","false"]){|val|
+ options["conditional-get"]=val||"true"
+ }
+ opt.on("--enable-async-dns6 [BOOL]",["true","false"]){|val|
+ options["enable-async-dns6"]=val||"true"
+ }
+ opt.on("--bt-tracker URIS"){|val| options["bt-tracker"]=val}
+ opt.on("--bt-exclude-tracker URIS"){|val| options["bt-exclude-tracker"]=val}
+ opt.on("--retry-wait SEC"){|val| options["retry-wait"]=val}
+
+ opt.on("--max-overall-download-limit LIMIT"){|val| options["max-overall-download-limit"]=val}
+ opt.on("--max-overall-upload-limit LIMIT"){|val| options["max-overall-upload-limit"]=val}
+ opt.on("-j","--max-concurrent-downloads N"){|val| options["max-concurrent-downloads"]=val}
+
+ opt.on("--server SERVER", "hostname of XML-RPC server. Default: localhost"){|val| options["server"]=val }
+ opt.on("--port PORT", "port of XML-RPC server. Default: 6800"){|val| options["port"]=val }
+
+ opt.on("--user USERNAME", "XML-RPC username"){|val| options["user"]=val }
+ opt.on("--passwd PASSWORD", "XML-RPC password"){|val| options["passwd"]=val }
+
+ opt.banner=<<EOS
+Usage: #{program_name} addUri URI... [options]
+ #{program_name} addTorrent /path/to/torrent_file URI... [options]
+ #{program_name} addMetalink /path/to/metalink_file [options]
+ #{program_name} remove GID [options]
+ #{program_name} pause GID [options]
+ #{program_name} pauseAll [options]
+ #{program_name} forcePause GID [options]
+ #{program_name} forcePauseAll [options]
+ #{program_name} unpause GID [options]
+ #{program_name} unpauseAll [options]
+ #{program_name} changePosition GID pos how [options]
+ #{program_name} tellStatus GID [keys] [options]
+ #{program_name} tellActive [keys] [options]
+ #{program_name} tellWaiting offset num [keys] [options]
+ #{program_name} tellStopped offset num [keys] [options]
+ #{program_name} getOption GID [options]
+ #{program_name} getGlobalOption [options]
+ #{program_name} getFiles GID [options]
+ #{program_name} getUris GID [options]
+ #{program_name} getPeers GID [options]
+ #{program_name} purgeDownloadResult [options]
+ #{program_name} removeDownloadResult GID [options]
+ #{program_name} changeOption GID [options]
+ #{program_name} changeGlobalOption [options]
+ #{program_name} getVersion [options]
+ #{program_name} getSessionInfo [options]
+ #{program_name} shutdown [options]
+ #{program_name} forceShutdown [options]
+Options:
+EOS
+
+
+ args=opt.parse(ARGV)
+
+end
+
+if !args or args.size == 0 then
+ puts "No command specified"
+ exit 1
+end
+
+command=args[0]
+resources=args[1..-1]
+
+auth=""
+if options.has_key?("user") then
+ auth=options["user"]+":"+options["passwd"]+"@"
+end
+if not options.has_key?("server") then
+ options["server"]="localhost"
+end
+if not options.has_key?("port") then
+ options["port"]="6800"
+end
+
+client=XMLRPC::Client.new3({:host => options["server"],
+ :port => options["port"],
+ :path => "/rpc",
+ :user => options["user"],
+ :password => options["passwd"]})
+
+options.delete("server")
+options.delete("port")
+options.delete("user")
+options.delete("passwd")
+
+if command == "addUri" then
+ result=client.call("aria2."+command, resources, options)
+elsif command == "addTorrent" then
+ torrentData=IO.read(resources[0])
+ result=client.call("aria2."+command,
+ XMLRPC::Base64.new(torrentData), resources[1..-1], options)
+elsif command == "addMetalink" then
+ metalinkData=IO.read(resources[0])
+ result=client.call("aria2."+command,
+ XMLRPC::Base64.new(metalinkData), options)
+elsif command == "tellStatus" then
+ result=client.call("aria2."+command, resources[0], resources[1..-1])
+elsif command == "tellActive" then
+ result=client.call("aria2."+command, resources[0..-1])
+elsif command == "tellWaiting" then
+ result=client.call("aria2."+command, resources[0].to_i(), resources[1].to_i(),
+ resources[2..-1])
+elsif command == "tellStopped" then
+ result=client.call("aria2."+command, resources[0].to_i(), resources[1].to_i(),
+ resources[2..-1])
+elsif command == "getOption" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "getGlobalOption" then
+ result=client.call("aria2."+command)
+elsif command == "pause" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "pauseAll" then
+ result=client.call("aria2."+command)
+elsif command == "forcePause" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "forcePauseAll" then
+ result=client.call("aria2."+command)
+elsif command == "unpause" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "unpauseAll" then
+ result=client.call("aria2."+command)
+elsif command == "remove" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "changePosition" then
+ result=client.call("aria2."+command, resources[0], resources[1].to_i(),
+ resources[2])
+elsif command == "getFiles" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "getUris" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "getPeers" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "purgeDownloadResult" then
+ result=client.call("aria2."+command)
+elsif command == "removeDownloadResult" then
+ result=client.call("aria2."+command, resources[0])
+elsif command == "changeOption" then
+ result=client.call("aria2."+command, resources[0], options)
+elsif command == "changeGlobalOption" then
+ result=client.call("aria2."+command, options)
+elsif command == "getVersion" then
+ result=client.call("aria2."+command)
+elsif command == "getSessionInfo" then
+ result=client.call("aria2."+command)
+elsif command == "shutdown" then
+ result=client.call("aria2."+command)
+elsif command == "forceShutdown" then
+ result=client.call("aria2."+command)
+else
+ puts "Command not recognized"
+ exit 1
+end
+
+pp result
diff --git a/autorss.py b/autorss.py
new file mode 100755
index 0000000..b0a2754
--- a/dev/null
+++ b/autorss.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python2
+"""Find RSS feed from site's LINK tag"""
+
+__author__ = "Mark Pilgrim ([email protected])"
+__copyright__ = "Copyright 2002, Mark Pilgrim"
+__license__ = "Python"
+
+try:
+ import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py
+ timeoutsocket.setDefaultSocketTimeout(10)
+except ImportError:
+ pass
+import urllib, urlparse
+from sgmllib import SGMLParser
+
+BUFFERSIZE = 1024
+
+class LinkParser(SGMLParser):
+ def reset(self):
+ SGMLParser.reset(self)
+ self.href = ''
+
+ def do_link(self, attrs):
+ if not ('rel', 'alternate') in attrs: return
+ if not ('type', 'application/rss+xml') in attrs: return
+ hreflist = [e[1] for e in attrs if e[0]=='href']
+ if hreflist:
+ self.href = hreflist[0]
+ self.setnomoretags()
+
+ def end_head(self, attrs):
+ self.setnomoretags()
+ start_body = end_head
+
+def getRSSLinkFromHTMLSource(htmlSource):
+ try:
+ parser = LinkParser()
+ parser.feed(htmlSource)
+ return parser.href
+ except:
+ return ''
+
+def getRSSLink(url):
+ try:
+ usock = urllib.urlopen(url)
+ parser = LinkParser()
+ while 1:
+ buffer = usock.read(BUFFERSIZE)
+ parser.feed(buffer)
+ if parser.nomoretags: break
+ if len(buffer) < BUFFERSIZE: break
+ usock.close()
+ return urlparse.urljoin(url, parser.href)
+ except:
+ return ''
+
+if __name__ == '__main__':
+ import sys
+ print getRSSLink(sys.argv[1])
+
diff --git a/battery_mon b/battery_mon
new file mode 100755
index 0000000..5591c5c
--- a/dev/null
+++ b/battery_mon
@@ -0,0 +1,5 @@
+#!/bin/bash
+status=`acpi -b`
+if grep 'Discharg' <<< $status;then
+echo "On Battery: $(cut -d ':' -f 2 <<< $status) |"
+fi
diff --git a/bldins b/bldins
new file mode 100755
index 0000000..4cb846d
--- a/dev/null
+++ b/bldins
@@ -0,0 +1,176 @@
+#!/bin/zsh
+
+pread (){
+ [[ $debug == 0 ]] && {
+ echo
+ echo -n ">_ "
+ read
+ }
+ }
+unset CFLAGS CXXFLAGS LDFLAGS CC
+
+pushd $HOME/Arch/Build/torvalds-git
+
+install=${INSTALL:-0}
+
+
+debug=${DEBUG:-0}
+export KBUILD_VERBOSE=${VERBOSE:-0}
+clean=${CLEAN:-1}
+MAKEFLAGS=${MAKEFLAGS:--j8}
+ccache=${CCACHE:-0}
+export ARCH=x86_64
+export LOCALVERSION=
+export INSTALL_FW_PATH=/lib/firmware
+if [[ $ccache == 1 ]];then
+ export PATH="/usr/lib/ccache/bin/:$PATH"
+ clean=0
+fi
+export BUILDCMD=" make $MAKEFLAGS bzImage modules "
+export MENUCONFIG_COLOR=blackbg
+export KRCH="x86"
+LOGFILE="$HOME/logs/kbuild/kbuild-$(date +%s)"
+export KCFLAGS=" -march=native -O2 -pipe "
+#export LDFLAGS_MODULE="-s"
+#export KBUILD_LDFLAGS_MODULE="-s"
+export INSTALL_MOD_STRIP="-s"
+
+if [[ $install == 0 ]];then
+
+ [[ $clean == 0 ]] && make clean
+
+ if [[ $debug == 1 ]];then
+
+ #export BUILDCMD=" make $MAKEFLAGS bzImage "
+ #cp ~/config/kconfig/config.debug .config
+ KCFLAGS=" -mtune=generic -O -pipe -ggdb3 "
+ BUILDCMD=" make $MAKEFLAGS bzImage "
+ #LDFLAGS_MODULE=""
+ unset INSTALL_MOD_STRIP
+ make localyesconfig || exit 1
+ else
+ #cp ~/config/kconfig/config .config
+ KCFLAGS=" -march=native -O2 -pipe "
+ make menuconfig || exit 1
+ make modules_prepare || exit 1
+
+ fi
+
+ kernver="$(make kernelrelease)"
+ echo "Kernel version $kernver"
+ echo "Building bzImage!"
+
+ #/usr/bin/time -p $=BUILDCMD | tee $LOGFILE || exit 1
+ /usr/bin/time -p $=BUILDCMD | tee $LOGFILE || exit 1
+
+ ver=${kernver[(ws:-:)-1]}
+ kernelname=${1:-$ver}
+
+ pread $kernelname $kernver
+else
+ kernver="$(make kernelrelease)"
+ ver=${kernver[(ws:-:)-1]}
+ kernelname=${1:-$ver}
+fi
+
+kernelname=$kernelname:l
+
+if [[ $debug == 0 ]];then
+ sudo cp -v System.map /boot/System.map26${kernelname}
+ sudo cp -v arch/x86/boot/bzImage /boot/vmlinuz26${kernelname} || exit 1
+ sudo install -m644 -D vmlinux /usr/src/linux-${kernver}/vmlinux || exit 1
+else
+ cp -v System.map ~/Arch/qemu/System.map26
+ cp arch/x86/boot/bzImage ~/Arch/qemu/kvmImage || exit 1
+ cp vmlinux ~/Arch/qemu/vmlinux || exit 1
+ #make modules_install INSTALL_MOD_PATH=~/Arch/qemu/modules || exit 1
+ #make modules_install MODLIB=~/Arch/qemu/module || exit 1
+ #make headers_install INSTALL_HDR_PATH=~/Arch/qemu/header || exit 1
+ exit
+fi
+
+echo "Generating mkinitcpio stuff"
+/bin/echo "ALL_kver=$kernver" | sudo tee /etc/mkinitcpio.d/kernel26${kernelname}.kver
+
+/bin/echo "
+# mkinitcpio preset file for $kernelname
+
+########################################/
+# DO NOT EDIT THIS LINE:
+source /etc/mkinitcpio.d/kernel26${kernelname}.kver
+########################################
+ALL_config=\"/etc/mkinitcpio.conf\"
+
+PRESETS=('default' 'fallback')
+
+default_image=\"/boot/kernel26-${kernelname}.img\"
+
+fallback_image=\"/boot/kernel26-fallback${kernelname}.img\"
+fallback_options=\"-S autodetect\"
+" | sudo tee /etc/mkinitcpio.d/kernel26${kernelname}.preset
+
+pread "Verify mkinitcpio.conf and others"
+pread "Installing modules to /lib/modules/$kernver"
+
+sudo make modules_install
+
+pread "Installing firmware"
+sudo make firmware_install
+
+pread "Time for headers..hmmm"
+
+sudo install -D -m644 Makefile /usr/src/linux-${kernver}/Makefile
+sudo install -D -m644 kernel/Makefile /usr/src/linux-${kernver}/kernel/Makefile
+sudo install -D -m644 .config /usr/src/linux-${kernver}/.config
+sudo mkdir -p /usr/src/linux-${kernver}/include
+
+ for i in acpi asm-generic config generated linux math-emu media net pcmcia scsi sound trace video xen; do
+ sudo cp -a include/$i /usr/src/linux-${kernver}/include/
+ done
+
+ sudo mkdir -p /usr/src/linux-${kernver}/arch/x86
+ sudo cp -a arch/x86/include /usr/src/linux-${kernver}/arch/x86/
+
+ sudo cp Module.symvers /usr/src/linux-${kernver}
+ sudo cp -a scripts /usr/src/linux-${kernver}
+ sudo chmod og-w -R /usr/src/linux-${kernver}/scripts
+ sudo mkdir -p /usr/src/linux-${kernver}/.tmp_versions
+
+ sudo mkdir -p /usr/src/linux-${kernver}/arch/$KRCH/kernel
+
+ sudo cp arch/$KRCH/Makefile /usr/src/linux-${kernver}/arch/$KRCH/
+ sudo cp arch/$KRCH/kernel/asm-offsets.s /usr/src/linux-${kernver}/arch/$KRCH/kernel/
+
+
+ sudo install -D -m644 Documentation/DocBook/Makefile \
+ /usr/src/linux-${kernver}/Documentation/DocBook/Makefile
+ sudo mkdir -p /usr/src/linux-${kernver}/include/linux
+ sudo cp include/linux/inotify.h /usr/src/linux-${kernver}/include/linux/
+ sudo mkdir -p /usr/src/linux-${kernver}/fs/xfs
+ sudo mkdir -p /usr/src/linux-${kernver}/mm
+ sudo cp fs/xfs/xfs_sb.h /usr/src/linux-${kernver}/fs/xfs/xfs_sb.h
+ sudo cp -a include/drm /usr/src/linux-${kernver}/include/
+ sudo cp -a include/trace /usr/src/linux-${kernver}/include/
+ for i in `find . -name "Kconfig*"`; do
+ sudo mkdir -p /usr/src/linux-${kernver}/`echo $i | sed 's|/Kconfig.*||'`
+ sudo cp $i /usr/src/linux-${kernver}/$i
+ done
+
+#sudo make headers_install INSTALL_HDR_PATH=/usr/src/linux-${kernver}/
+
+ # copy files necessary for later builds, like nvidia and vmware
+
+sudo chown -R root.root /usr/src/linux-${kernver}
+sudo find /usr/src/linux-${kernver} -type d -exec chmod 755 {} \;
+
+echo "Installing the initramfs image"
+sudo mkinitcpio -p kernel26${kernelname}
+
+sudo depmod $kernver
+
+pushd /lib/modules/${kernver}
+sudo rm -f build
+sudo ln -sf /usr/src/linux-${kernver} build
+popd
+popd
diff --git a/blogit b/blogit
new file mode 100755
index 0000000..605b088
--- a/dev/null
+++ b/blogit
@@ -0,0 +1,13 @@
+#!/bin/zsh
+DIR="/media/sate/srv/http/lighty/blog/"
+pushd $HOME/.posts
+echo "Post name"
+read post
+if [[ -f $DIR/$post ]];then
+ /usr/bin/vim $DIR/$post
+else
+ /usr/bin/vim $post
+ cp $post $DIR/
+fi
+popd
+
diff --git a/bloker b/bloker
new file mode 100755
index 0000000..fa5ee87
--- a/dev/null
+++ b/bloker
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+set -x
+
+if [ "$1" = "rehash" ]
+then
+
+ mkdir -p /dev/shm/create_list
+ cd /dev/shm/create_list
+ #ALL ads-trackers-and-bad-pr0n dshield ipset_rules level2 spyware templist
+ #badpeers bogon hijacked level1(AVOID) Microsoft spider
+ LISTS="ads-trackers-and-bad-pr0n hijacked badpeers Microsoft bogon"
+ for list in $LISTS
+ do
+ wget http://www.bluetack.co.uk/config/$list.gz
+ done
+ #gunzip *.gz
+ #http://www.maeyanie.com/2008/12/efficient-iptables-peerguardian-blocklist/
+ #cat ads-trackers-and-bad-pr0n hijacked badpeers Microsoft bogon | pg2ipse - - ads1 > ADS
+ gunzip -c *.gz | pg2ipse - - ads1 > ADS
+ echo "Reading into ipset"
+ read
+ cat ADS | grep -v ":" | /bin/grep -E '([0-9\.]+|^COMMIT)' | sudo ipset -R
+
+ echo "Applying to iptables"
+ read
+ sudo iptables -A INPUT -m set --set ads1 src -j DROP
+ sudo iptables -A FORWARD -m set --set ads1 src -j DROP
+
+ sudo iptables -A FORWARD -m set --set ads1 dst -j REJECT
+ sudo iptables -A OUTPUT -m set --set ads1 dst -j REJECT
+
+elif [ "$1" = "unload" ];then
+
+ sudo iptables -D INPUT -m set --set ads1 src -j DROP
+ sudo iptables -D FORWARD -m set --set ads1 src -j DROP
+ sudo iptables -D FORWARD -m set --set ads1 dst -j REJECT
+ sudo iptables -D OUTPUT -m set --set ads1 dst -j REJECT
+
+elif [ "$1" = "load" ];then
+
+ cat ~/config/BLOCK | sudo ipset -R
+ sleep 4
+ sudo iptables -A INPUT -m set --set ads1 src -j DROP
+ sudo iptables -A FORWARD -m set --set ads1 src -j DROP
+ sudo iptables -A FORWARD -m set --set ads1 dst -j REJECT
+ sudo iptables -A OUTPUT -m set --set ads1 dst -j REJECT
+
+fi
+set +x
diff --git a/booktwit b/booktwit
new file mode 100755
index 0000000..d88c6a4
--- a/dev/null
+++ b/booktwit
@@ -0,0 +1,46 @@
+#!/bin/zsh
+# vim: ft=sh et:
+user=emptyvacuum
+pass=$(cat $HOME/.booktwit)
+source bin/functions
+
+if [[ $1 == "search" ]];then
+ ~/bin/searchtwit.py "" $pass
+ exit
+elif [[ $1 == "all" || $1 == "list" ]];then
+ #curl --basic --user $user:$pass http://twitter.com/$user
+ #expr="~/bin/searchtwit.py all $pass | tr ',' '\n' | tr -d \' | perl -lne 'print $1 if /.*?u(.+)$/'"
+ url=$(~/bin/searchtwit.py all $pass | tr ',' '\n' | tr -d \' | perl -lne 'print $1 if /.*?u(.+)$/' | ${=DMENU} | cut -d " " -f 1)
+ [[ ! -z $url ]] && $BROWSER $url
+ exit
+
+elif [[ $1 == "list" ]];then
+ url=$( cat ~/.bookmarks | ${=DMENU} | cut -d "|" -f 1)
+ [[ ! -z $url ]] && $BROWSER $url
+ exit
+fi
+#set +x
+
+if [[ -z $1 ]];then
+ url=$(xsel -o)
+else
+ url=$1
+fi
+if ! isurl $url;then
+ exit 1
+fi
+
+#echo "Enter URL"
+#read url
+#echo "Description"
+#shift
+if [[ -z $2 ]];then
+ desc="$(zenity --title 'Booktwit' --entry --text 'Enter the description')"
+else
+ title="$2"
+ shift 2
+fi
+
+echo -e "\n${url}|$title|$desc" >>| ~/.bookmarks
+curl -s --basic --user $user:$pass --data status="$url $desc" http://twitter.com/statuses/update.xml
diff --git a/brightness b/brightness
new file mode 100755
index 0000000..859a28f
--- a/dev/null
+++ b/brightness
@@ -0,0 +1,16 @@
+#!/bin/zsh
+#device="/proc/acpi/video/PEGP/LCD/brightness"
+device="/sys/devices/virtual/backlight/acpi_video0/brightness"
+# 1 2 3
+#typeset -a avl
+#avl=($(head -1 $device | cut -d ' ' -f 2-))
+
+value=$(tail -1 $device )
+#index=$avl[(I)$value]
+
+
+if [[ $1 = up ]];then value=$(( value+1 ));fi
+if [[ $1 = down ]];then value=$(( value-1 ));fi
+
+
+echo -n $value >| $device
diff --git a/browser b/browser
new file mode 100755
index 0000000..2364ba8
--- a/dev/null
+++ b/browser
@@ -0,0 +1,53 @@
+#!/bin/zsh
+# The main browser script
+URL="$1"
+if [[ $URL == (#i)(*.jpg|*.png|*.jpeg) ]];then
+ notify-send "Browser" "feh for $URL"
+ file=$URL:t
+ file="~/.local/share/feh/$file"
+ if [[ ! -f "$file" ]];then
+ feh -j ~/.local/share/feh/ -k "$URL"
+ else
+ feh $file
+ fi
+ exit
+fi
+
+if [[ $URL == *youtube.com/watch* ]];then
+ notify-send "Browser" "youtube for $URL"
+ #DFILE=
+ #trap 'rm $DFILE;kill $$' TERM
+ #trap '~/bin/mplayeraux stop' INT
+ tubeplay $URL
+ #sleep 2
+ #DFILE=$(ls -Acr |tail -1)
+ #export DFILE
+ exit
+fi
+
+if [[ $URL == (#i)(*.mp3|*.ogg) ]];then
+ notify-send "Browser" "mp3/mplayer for $URL"
+ #echo "$URL" | mpc add
+ localf="${URL:t}"
+ /usr/bin/wget -O - $URL | tee $HOME/media/online/$localf | /usr/bin/mplayer -cache 4096 -cache-min 40 "$URL"
+ exit
+fi
+
+
+OB="firefox"
+isrxvt=0
+
+if [[ $0 == *ubrowser ]];then
+ isrxvt=1
+fi
+
+#if [[ $URL == http://tinyurl* || $num1 == $num2 || ((! -t 1) && $isrxvt == 0) ]];then
+if [[ $URL == http://tinyurl* || $isrxvt == 0 ]];then
+ OB="$BROWSER"
+ $OB -remote "openurl($URL)"
+ #$OB "$URL"
+else
+ OB="$TBROWSER"
+ tmux new-window -a -d -t uake: "echo -e '\a'; $OB $URL"
+fi
+
diff --git a/buildkernel b/buildkernel
new file mode 100755
index 0000000..7e4b804
--- a/dev/null
+++ b/buildkernel
@@ -0,0 +1,35 @@
+#!/bin/bash
+set -x
+
+#export KCFLAGS="-march=k8 -O2 -pipe"
+
+ARCH=x86
+BUILD="/media/Sentinel/zen/zen-stable/"
+echo "Copy your config file to $BUILD as .config"
+read
+make prepare
+echo "Running menuconfig"
+echo "Skip menuconfig Y/n"
+read yesno
+[[ $yesno == "n" ]] && make menuconfig
+#echo "Running general config"
+#read
+#make config
+read
+kernver="$(make kernelrelease)"
+echo "Kernel version $kernver"
+read
+echo "Building bzImage!"
+read
+make -j3 bzImage modules || exit 1
+#echo "Building modules!"
+#read
+#make -j3 modules
+
+echo "Steps after this affects ur system"
+read
+echo "Sure?"
+read
+
+sudo ./installkernel "$kernver"
+set +x
diff --git a/catc b/catc
new file mode 100755
index 0000000..b13ba7e
--- a/dev/null
+++ b/catc
@@ -0,0 +1,24 @@
+#!/bin/zsh
+#set -x
+
+# Part of pipe
+if [[ ! -t 0 ]];then
+ exit
+fi
+
+# Source of Pipe
+if [[ ! -t 1 ]];then
+ #echo "Test"
+ exit
+fi
+
+columns=${$(stty size)[1]}
+lines=${$(wc -l $1)[1]}
+
+if [[ $columns -gt $lines ]];then
+else
+ /bin/less [email protected]
+fi
+#set +x
diff --git a/checksec b/checksec
new file mode 100755
index 0000000..000c060
--- a/dev/null
+++ b/checksec
@@ -0,0 +1,849 @@
+#!/bin/bash
+#
+# The BSD License (http://www.opensource.org/licenses/bsd-license.php)
+# specifies the terms and conditions of use for checksec.sh:
+#
+# Copyright (c) 2009-2011, Tobias Klein.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Tobias Klein nor the name of trapkit.de may be
+# used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+# Name : checksec.sh
+# Version : 1.4
+# Author : Tobias Klein
+# Date : January 2011
+# Download: http://www.trapkit.de/tools/checksec.html
+# Changes : http://www.trapkit.de/tools/checksec_changes.txt
+#
+# Description:
+#
+# Modern Linux distributions offer some mitigation techniques to make it
+# harder to exploit software vulnerabilities reliably. Mitigations such
+# as RELRO, NoExecute (NX), Stack Canaries, Address Space Layout
+# Randomization (ASLR) and Position Independent Executables (PIE) have
+# made reliably exploiting any vulnerabilities that do exist far more
+# challenging. The checksec.sh script is designed to test what *standard*
+# Linux OS and PaX (http://pax.grsecurity.net/) security features are being
+# used.
+#
+# As of version 1.3 the script also lists the status of various Linux kernel
+# protection mechanisms.
+#
+# Credits:
+#
+# Thanks to Brad Spengler (grsecurity.net) for the PaX support.
+# Thanks to Jon Oberheide (jon.oberheide.org) for the kernel support.
+#
+# Others that contributed to checksec.sh (in no particular order):
+#
+# Simon Ruderich, Denis Scherbakov, Stefan Kuttler, Radoslaw Madej,
+# Anthony G. Basile.
+#
+
+# global vars
+have_readelf=1
+verbose=false
+
+# FORTIFY_SOURCE vars
+FS_end=_chk
+FS_cnt_total=0
+FS_cnt_checked=0
+FS_cnt_unchecked=0
+FS_chk_func_libc=0
+FS_functions=0
+FS_libc=0
+
+# help
+if [ "$#" = "0" -o "$1" = "--help" ] ; then
+ echo "Usage: checksec [OPTION]"
+ echo
+ echo "Options:"
+ echo
+ echo " --file <executable-file>"
+ echo " --dir <directory> [-v]"
+ echo " --proc <process name>"
+ echo " --proc-all"
+ echo " --proc-libs <process ID>"
+ echo " --kernel"
+ echo " --fortify-file <executable-file>"
+ echo " --fortify-proc <process ID>"
+ echo " --version"
+ echo " --help"
+ echo
+ echo "For more information, see:"
+ echo " http://www.trapkit.de/tools/checksec.html"
+ echo
+ exit 1
+fi
+
+# version information
+version() {
+ echo "checksec v1.4, Tobias Klein, www.trapkit.de, January 2011"
+ echo
+}
+
+# check if command exists
+command_exists () {
+ type $1 > /dev/null 2>&1;
+}
+
+# check if directory exists
+dir_exists () {
+ if [ -d $1 ] ; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+# check user privileges
+root_privs () {
+ if [ $(/usr/bin/id -u) -eq 0 ] ; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+# check if input is numeric
+isNumeric () {
+ echo "[email protected]" | grep -q -v "[^0-9]"
+}
+
+# check if input is a string
+isString () {
+ echo "[email protected]" | grep -q -v "[^A-Za-z]"
+}
+
+# check file(s)
+filecheck() {
+ # check for RELRO support
+ if readelf -l $1 2>/dev/null | grep -q 'GNU_RELRO'; then
+ if readelf -d $1 2>/dev/null | grep -q 'BIND_NOW'; then
+ echo -n -e '\033[32mFull RELRO \033[m '
+ else
+ echo -n -e '\033[33mPartial RELRO\033[m '
+ fi
+ else
+ echo -n -e '\033[31mNo RELRO \033[m '
+ fi
+
+ # check for stack canary support
+ if readelf -s $1 2>/dev/null | grep -q '__stack_chk_fail'; then
+ echo -n -e '\033[32mCanary found \033[m '
+ else
+ echo -n -e '\033[31mNo canary found\033[m '
+ fi
+
+ # check for NX support
+ if readelf -W -l $1 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
+ echo -n -e '\033[31mNX disabled\033[m '
+ else
+ echo -n -e '\033[32mNX enabled \033[m '
+ fi
+
+ # check for PIE support
+ if readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
+ echo -n -e '\033[31mNo PIE \033[m '
+ elif readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
+ if readelf -d $1 2>/dev/null | grep -q '(DEBUG)'; then
+ echo -n -e '\033[32mPIE enabled \033[m '
+ else
+ echo -n -e '\033[33mDynamic Shared Object\033[m '
+ fi
+ else
+ echo -n -e '\033[33mNot an ELF file \033[m '
+ fi
+}
+
+# check process(es)
+proccheck() {
+ # check for RELRO support
+ if readelf -l $1/exe 2>/dev/null | grep -q 'Program Headers'; then
+ if readelf -l $1/exe 2>/dev/null | grep -q 'GNU_RELRO'; then
+ if readelf -d $1/exe 2>/dev/null | grep -q 'BIND_NOW'; then
+ echo -n -e '\033[32mFull RELRO \033[m '
+ else
+ echo -n -e '\033[33mPartial RELRO \033[m '
+ fi
+ else
+ echo -n -e '\033[31mNo RELRO \033[m '
+ fi
+ else
+ echo -n -e '\033[31mPermission denied (please run as root)\033[m\n'
+ exit 1
+ fi
+
+ # check for stack canary support
+ if readelf -s $1/exe 2>/dev/null | grep -q 'Symbol table'; then
+ if readelf -s $1/exe 2>/dev/null | grep -q '__stack_chk_fail'; then
+ echo -n -e '\033[32mCanary found \033[m '
+ else
+ echo -n -e '\033[31mNo canary found \033[m '
+ fi
+ else
+ if [ "$1" != "1" ] ; then
+ echo -n -e '\033[33mPermission denied \033[m '
+ else
+ echo -n -e '\033[33mNo symbol table found\033[m '
+ fi
+ fi
+
+ # first check for PaX support
+ if cat $1/status 2> /dev/null | grep -q 'PaX:'; then
+ pageexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b6) )
+ segmexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b10) )
+ mprotect=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b8) )
+ randmmap=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b9) )
+ if [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "R" ]] ; then
+ echo -n -e '\033[32mPaX enabled\033[m '
+ elif [[ "$pageexec" = "p" && "$segmexec" = "s" && "$randmmap" = "R" ]] ; then
+ echo -n -e '\033[33mPaX ASLR only\033[m '
+ elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "R" ]] ; then
+ echo -n -e '\033[33mPaX mprot off \033[m'
+ elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "r" ]] ; then
+ echo -n -e '\033[33mPaX ASLR off\033[m '
+ elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "r" ]] ; then
+ echo -n -e '\033[33mPaX NX only\033[m '
+ else
+ echo -n -e '\033[31mPaX disabled\033[m '
+ fi
+ # fallback check for NX support
+ elif readelf -W -l $1/exe 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
+ echo -n -e '\033[31mNX disabled\033[m '
+ else
+ echo -n -e '\033[32mNX enabled \033[m '
+ fi
+
+ # check for PIE support
+ if readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
+ echo -n -e '\033[31mNo PIE \033[m '
+ elif readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
+ if readelf -d $1/exe 2>/dev/null | grep -q '(DEBUG)'; then
+ echo -n -e '\033[32mPIE enabled \033[m '
+ else
+ echo -n -e '\033[33mDynamic Shared Object\033[m '
+ fi
+ else
+ echo -n -e '\033[33mNot an ELF file \033[m '
+ fi
+}
+
+# check mapped libraries
+libcheck() {
+ libs=( $(awk '{ print $6 }' /proc/$1/maps | grep '/' | sort -u | xargs file | grep ELF | awk '{ print $1 }' | sed 's/:/ /') )
+
+ printf "\n* Loaded libraries (file information, # of mapped files: ${#libs[@]}):\n\n"
+
+ for element in $(seq 0 $((${#libs[@]} - 1)))
+ do
+ echo " ${libs[$element]}:"
+ echo -n " "
+ filecheck ${libs[$element]}
+ printf "\n\n"
+ done
+}
+
+# check for system-wide ASLR support
+aslrcheck() {
+ # PaX ASLR support
+ if !(cat /proc/1/status 2> /dev/null | grep -q 'Name:') ; then
+ echo -n -e ':\033[33m insufficient privileges for PaX ASLR checks\033[m\n'
+ echo -n -e ' Fallback to standard Linux ASLR check'
+ fi
+
+ if cat /proc/1/status 2> /dev/null | grep -q 'PaX:'; then
+ printf ": "
+ if cat /proc/1/status 2> /dev/null | grep 'PaX:' | grep -q 'R'; then
+ echo -n -e '\033[32mPaX ASLR enabled\033[m\n\n'
+ else
+ echo -n -e '\033[31mPaX ASLR disabled\033[m\n\n'
+ fi
+ else
+ # standard Linux 'kernel.randomize_va_space' ASLR support
+ # (see the kernel file 'Documentation/sysctl/kernel.txt' for a detailed description)
+ printf " (kernel.randomize_va_space): "
+ if /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 1'; then
+ echo -n -e '\033[33mOn (Setting: 1)\033[m\n\n'
+ printf " Description - Make the addresses of mmap base, stack and VDSO page randomized.\n"
+ printf " This, among other things, implies that shared libraries will be loaded to \n"
+ printf " random addresses. Also for PIE-linked binaries, the location of code start\n"
+ printf " is randomized. Heap addresses are *not* randomized.\n\n"
+ elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 2'; then
+ echo -n -e '\033[32mOn (Setting: 2)\033[m\n\n'
+ printf " Description - Make the addresses of mmap base, heap, stack and VDSO page randomized.\n"
+ printf " This, among other things, implies that shared libraries will be loaded to random \n"
+ printf " addresses. Also for PIE-linked binaries, the location of code start is randomized.\n\n"
+ elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 0'; then
+ echo -n -e '\033[31mOff (Setting: 0)\033[m\n'
+ else
+ echo -n -e '\033[31mNot supported\033[m\n'
+ fi
+ printf " See the kernel file 'Documentation/sysctl/kernel.txt' for more details.\n\n"
+ fi
+}
+
+# check cpu nx flag
+nxcheck() {
+ if grep -q nx /proc/cpuinfo; then
+ echo -n -e '\033[32mYes\033[m\n\n'
+ else
+ echo -n -e '\033[31mNo\033[m\n\n'
+ fi
+}
+
+# check for kernel protection mechanisms
+kernelcheck() {
+ printf " Description - List the status of kernel protection mechanisms. Rather than\n"
+ printf " inspect kernel mechanisms that may aid in the prevention of exploitation of\n"
+ printf " userspace processes, this option lists the status of kernel configuration\n"
+ printf " options that harden the kernel itself against attack.\n\n"
+ printf " Kernel config: "
+
+ if [ -f /proc/config.gz ] ; then
+ kconfig="zcat /proc/config.gz"
+ printf "\033[32m/proc/config.gz\033[m\n\n"
+ elif [ -f /boot/config-`uname -r` ] ; then
+ kconfig="cat /boot/config-`uname -r`"
+ printf "\033[33m/boot/config-`uname -r`\033[m\n\n"
+ printf " Warning: The config on disk may not represent running kernel config!\n\n";
+ elif [ -f /usr/src/linux/.config ] ; then
+ kconfig="cat /usr/src/linux/.config"
+ printf "\033[33m/usr/src/linux/.config\033[m\n\n"
+ printf " Warning: The config on disk may not represent running kernel config!\n\n";
+ else
+ printf "\033[31mNOT FOUND\033[m\n\n"
+ exit 0
+ fi
+
+ printf " GCC stack protector support: "
+ if $kconfig | grep -qi 'CONFIG_CC_STACKPROTECTOR=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Strict user copy checks: "
+ if $kconfig | grep -qi 'CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Enforce read-only kernel data: "
+ if $kconfig | grep -qi 'CONFIG_DEBUG_RODATA=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+ printf " Restrict /dev/mem access: "
+ if $kconfig | grep -qi 'CONFIG_STRICT_DEVMEM=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Restrict /dev/kmem access: "
+ if $kconfig | grep -qi 'CONFIG_DEVKMEM=y'; then
+ printf "\033[31mDisabled\033[m\n"
+ else
+ printf "\033[32mEnabled\033[m\n"
+ fi
+
+ printf "\n"
+ printf "* grsecurity / PaX: "
+
+ if $kconfig | grep -qi 'CONFIG_GRKERNSEC=y'; then
+ if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIGH=y'; then
+ printf "\033[32mHigh GRKERNSEC\033[m\n\n"
+ elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_MEDIUM=y'; then
+ printf "\033[33mMedium GRKERNSEC\033[m\n\n"
+ elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_LOW=y'; then
+ printf "\033[31mLow GRKERNSEC\033[m\n\n"
+ else
+ printf "\033[33mCustom GRKERNSEC\033[m\n\n"
+ fi
+
+ printf " Non-executable kernel pages: "
+ if $kconfig | grep -qi 'CONFIG_PAX_KERNEXEC=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Prevent userspace pointer deref: "
+ if $kconfig | grep -qi 'CONFIG_PAX_MEMORY_UDEREF=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Prevent kobject refcount overflow: "
+ if $kconfig | grep -qi 'CONFIG_PAX_REFCOUNT=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Bounds check heap object copies: "
+ if $kconfig | grep -qi 'CONFIG_PAX_USERCOPY=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Disable writing to kmem/mem/port: "
+ if $kconfig | grep -qi 'CONFIG_GRKERNSEC_KMEM=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Disable privileged I/O: "
+ if $kconfig | grep -qi 'CONFIG_GRKERNSEC_IO=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Harden module auto-loading: "
+ if $kconfig | grep -qi 'CONFIG_GRKERNSEC_MODHARDEN=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+
+ printf " Hide kernel symbols: "
+ if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIDESYM=y'; then
+ printf "\033[32mEnabled\033[m\n"
+ else
+ printf "\033[31mDisabled\033[m\n"
+ fi
+ else
+ printf "\033[31mNo GRKERNSEC\033[m\n\n"
+ printf " The grsecurity / PaX patchset is available here:\n"
+ printf " http://grsecurity.net/\n"
+ fi
+
+ printf "\n"
+ printf "* Kernel Heap Hardening: "
+
+ if $kconfig | grep -qi 'CONFIG_KERNHEAP=y'; then
+ if $kconfig | grep -qi 'CONFIG_KERNHEAP_FULLPOISON=y'; then
+ printf "\033[32mFull KERNHEAP\033[m\n\n"
+ else
+ printf "\033[33mPartial KERNHEAP\033[m\n\n"
+ fi
+ else
+ printf "\033[31mNo KERNHEAP\033[m\n\n"
+ printf " The KERNHEAP hardening patchset is available here:\n"
+ printf " https://www.subreption.com/kernheap/\n\n"
+ fi
+}
+
+# --- FORTIFY_SOURCE subfunctions (start) ---
+
+# is FORTIFY_SOURCE supported by libc?
+FS_libc_check() {
+ printf "* FORTIFY_SOURCE support available (libc) : "
+
+ if [ "${#FS_chk_func_libc[@]}" != "0" ] ; then
+ printf "\033[32mYes\033[m\n"
+ else
+ printf "\033[31mNo\033[m\n"
+ exit 1
+ fi
+}
+
+# was the binary compiled with FORTIFY_SOURCE?
+FS_binary_check() {
+ printf "* Binary compiled with FORTIFY_SOURCE support: "
+
+ for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
+ do
+ if [[ ${FS_functions[$FS_elem_functions]} =~ _chk ]] ; then
+ printf "\033[32mYes\033[m\n"
+ return
+ fi
+ done
+ printf "\033[31mNo\033[m\n"
+ exit 1
+}
+
+FS_comparison() {
+ echo
+ printf " ------ EXECUTABLE-FILE ------- . -------- LIBC --------\n"
+ printf " FORTIFY-able library functions | Checked function names\n"
+ printf " -------------------------------------------------------\n"
+
+ for FS_elem_libc in $(seq 0 $((${#FS_chk_func_libc[@]} - 1)))
+ do
+ for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
+ do
+ FS_tmp_func=${FS_functions[$FS_elem_functions]}
+ FS_tmp_libc=${FS_chk_func_libc[$FS_elem_libc]}
+
+ if [[ $FS_tmp_func =~ ^$FS_tmp_libc$ ]] ; then
+ printf " \033[31m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
+ let FS_cnt_total++
+ let FS_cnt_unchecked++
+ elif [[ $FS_tmp_func =~ ^$FS_tmp_libc(_chk) ]] ; then
+ printf " \033[32m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
+ let FS_cnt_total++
+ let FS_cnt_checked++
+ fi
+
+ done
+ done
+}
+
+FS_summary() {
+ echo
+ printf "SUMMARY:\n\n"
+ printf "* Number of checked functions in libc : ${#FS_chk_func_libc[@]}\n"
+ printf "* Total number of library functions in the executable: ${#FS_functions[@]}\n"
+ printf "* Number of FORTIFY-able functions in the executable : %s\n" $FS_cnt_total
+ printf "* Number of checked functions in the executable : \033[32m%s\033[m\n" $FS_cnt_checked
+ printf "* Number of unchecked functions in the executable : \033[31m%s\033[m\n" $FS_cnt_unchecked
+ echo
+}
+
+# --- FORTIFY_SOURCE subfunctions (end) ---
+
+if [ "$1" = "--version" ] ; then
+ version
+ exit 0
+fi
+
+if !(command_exists readelf) ; then
+ printf "\033[31mWarning: 'readelf' not found! It's required for most checks.\033[m\n\n"
+ have_readelf=0
+fi
+
+if [ "$3" = "-v" ] ; then
+ verbose=true
+fi
+
+if [ "$1" = "--dir" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ if [ -z "$2" ] ; then
+ printf "\033[31mError: Please provide a valid directory.\033[m\n\n"
+ exit 1
+ fi
+ # remove trailing slashes
+ tempdir=`echo $2 | sed -e "s/\/*$//"`
+ if [ ! -d $tempdir ] ; then
+ printf "\033[31mError: The directory '$tempdir' does not exist.\033[m\n\n"
+ exit 1
+ fi
+ cd $tempdir
+ printf "RELRO STACK CANARY NX PIE FILE\n"
+ for N in [A-Za-z]*; do
+ if [ "$N" != "[A-Za-z]*" ]; then
+ # read permissions?
+ if [ ! -r $N ]; then
+ printf "\033[31mError: No read permissions for '$tempdir/$N' (run as root).\033[m\n"
+ else
+ # ELF executable?
+ out=`file $N`
+ if [[ ! $out =~ ELF ]] ; then
+ if [ "$verbose" = "true" ] ; then
+ printf "\033[34m*** Not an ELF file: $tempdir/"
+ file $N
+ printf "\033[m"
+ fi
+ else
+ filecheck $N
+ if [ `find $tempdir/$N \( -perm -004000 -o -perm -002000 \) -type f -print` ]; then
+ printf "\033[37;41m%s%s\033[m" $2 $N
+ else
+ printf "%s%s" $tempdir/ $N
+ fi
+ echo
+ fi
+ fi
+ fi
+ done
+ exit 0
+fi
+
+if [ "$1" = "--file" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ if [ -z "$2" ] ; then
+ printf "\033[31mError: Please provide a valid file.\033[m\n\n"
+ exit 1
+ fi
+ # does the file exist?
+ if [ ! -e $2 ] ; then
+ printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
+ exit 1
+ fi
+ # read permissions?
+ if [ ! -r $2 ] ; then
+ printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
+ exit 1
+ fi
+ # ELF executable?
+ out=`file $2`
+ if [[ ! $out =~ ELF ]] ; then
+ printf "\033[31mError: Not an ELF file: "
+ file $2
+ printf "\033[m\n"
+ exit 1
+ fi
+ printf "RELRO STACK CANARY NX PIE FILE\n"
+ filecheck $2
+ if [ `find $2 \( -perm -004000 -o -perm -002000 \) -type f -print` ] ; then
+ printf "\033[37;41m%s%s\033[m" $2 $N
+ else
+ printf "%s" $2
+ fi
+ echo
+ exit 0
+fi
+
+if [ "$1" = "--proc-all" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ cd /proc
+ printf "* System-wide ASLR"
+ aslrcheck
+ printf "* Does the CPU support NX: "
+ nxcheck
+ printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
+ for N in [1-9]*; do
+ if [ $N != $$ ] && readlink -q $N/exe > /dev/null; then
+ printf "%16s" `head -1 $N/status | cut -b 7-`
+ printf "%7d " $N
+ proccheck $N
+ echo
+ fi
+ done
+ if [ ! -e /usr/bin/id ] ; then
+ printf "\n\033[33mNote: If you are running 'checksec.sh' as an unprivileged user, you\n"
+ printf " will not see all processes. Please run the script as root.\033[m\n\n"
+ else
+ if !(root_privs) ; then
+ printf "\n\033[33mNote: You are running 'checksec.sh' as an unprivileged user.\n"
+ printf " Too see all processes, please run the script as root.\033[m\n\n"
+ fi
+ fi
+ exit 0
+fi
+
+if [ "$1" = "--proc" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ if [ -z "$2" ] ; then
+ printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
+ exit 1
+ fi
+ if !(isString "$2") ; then
+ printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
+ exit 1
+ fi
+ cd /proc
+ printf "* System-wide ASLR"
+ aslrcheck
+ printf "* Does the CPU support NX: "
+ nxcheck
+ printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
+ for N in `ps -Ao pid,comm | grep $2 | cut -b1-6`; do
+ if [ -d $N ] ; then
+ printf "%16s" `head -1 $N/status | cut -b 7-`
+ printf "%7d " $N
+ # read permissions?
+ if [ ! -r $N/exe ] ; then
+ if !(root_privs) ; then
+ printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
+ exit 1
+ fi
+ if [ ! `readlink $N/exe` ] ; then
+ printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
+ exit 1
+ fi
+ exit 1
+ fi
+ proccheck $N
+ echo
+ fi
+ done
+ exit 0
+fi
+
+if [ "$1" = "--proc-libs" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ if [ -z "$2" ] ; then
+ printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
+ exit 1
+ fi
+ if !(isNumeric "$2") ; then
+ printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
+ exit 1
+ fi
+ cd /proc
+ printf "* System-wide ASLR"
+ aslrcheck
+ printf "* Does the CPU support NX: "
+ nxcheck
+ printf "* Process information:\n\n"
+ printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
+ N=$2
+ if [ -d $N ] ; then
+ printf "%16s" `head -1 $N/status | cut -b 7-`
+ printf "%7d " $N
+ # read permissions?
+ if [ ! -r $N/exe ] ; then
+ if !(root_privs) ; then
+ printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
+ exit 1
+ fi
+ if [ ! `readlink $N/exe` ] ; then
+ printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
+ exit 1
+ fi
+ exit 1
+ fi
+ proccheck $N
+ echo
+ libcheck $N
+ fi
+ exit 0
+fi
+
+if [ "$1" = "--kernel" ] ; then
+ cd /proc
+ printf "* Kernel protection information:\n\n"
+ kernelcheck
+ exit 0
+fi
+
+if [ "$1" = "--fortify-file" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ if [ -z "$2" ] ; then
+ printf "\033[31mError: Please provide a valid file.\033[m\n\n"
+ exit 1
+ fi
+ # does the file exist?
+ if [ ! -e $2 ] ; then
+ printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
+ exit 1
+ fi
+ # read permissions?
+ if [ ! -r $2 ] ; then
+ printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
+ exit 1
+ fi
+ # ELF executable?
+ out=`file $2`
+ if [[ ! $out =~ ELF ]] ; then
+ printf "\033[31mError: Not an ELF file: "
+ file $2
+ printf "\033[m\n"
+ exit 1
+ fi
+ if [ -e /lib/libc.so.6 ] ; then
+ FS_libc=/lib/libc.so.6
+ elif [ -e /lib64/libc.so.6 ] ; then
+ FS_libc=/lib64/libc.so.6
+ else
+ printf "\033[31mError: libc not found.\033[m\n\n"
+ exit 1
+ fi
+
+ FS_chk_func_libc=( $(readelf -s $FS_libc | grep [email protected]@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/[email protected]*//') )
+ FS_functions=( $(readelf -s $2 | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
+
+ FS_libc_check
+ FS_binary_check
+ FS_comparison
+ FS_summary
+
+ exit 0
+fi
+
+if [ "$1" = "--fortify-proc" ] ; then
+ if [ $have_readelf -eq 0 ] ; then
+ exit 1
+ fi
+ if [ -z "$2" ] ; then
+ printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
+ exit 1
+ fi
+ if !(isNumeric "$2") ; then
+ printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
+ exit 1
+ fi
+ cd /proc
+ N=$2
+ if [ -d $N ] ; then
+ # read permissions?
+ if [ ! -r $N/exe ] ; then
+ if !(root_privs) ; then
+ printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
+ exit 1
+ fi
+ if [ ! `readlink $N/exe` ] ; then
+ printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
+ exit 1
+ fi
+ exit 1
+ fi
+ if [ -e /lib/libc.so.6 ] ; then
+ FS_libc=/lib/libc.so.6
+ elif [ -e /lib64/libc.so.6 ] ; then
+ FS_libc=/lib64/libc.so.6
+ else
+ printf "\033[31mError: libc not found.\033[m\n\n"
+ exit 1
+ fi
+ printf "* Process name (PID) : %s (%d)\n" `head -1 $N/status | cut -b 7-` $N
+ FS_chk_func_libc=( $(readelf -s $FS_libc | grep [email protected]@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/[email protected]*//') )
+ FS_functions=( $(readelf -s $2/exe | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
+
+ FS_libc_check
+ FS_binary_check
+ FS_comparison
+ FS_summary
+
+ fi
+ exit 0
+fi
+
+printf "\033[31mError: Unknown option '$1'.\033[m\n\n"
+exit 1
diff --git a/clipbored b/clipbored
new file mode 100755
index 0000000..5a29413
--- a/dev/null
+++ b/clipbored
@@ -0,0 +1,186 @@
+#!/usr/bin/perl
+use strict;
+use Getopt::Long;
+use Pod::Usage;
+use File::Path 'make_path';
+
+my $xsel_log = "$ENV{XDG_DATA_HOME}/clipbored/clips";
+my $pidfile = '/tmp/clipbored.pid';
+
+if(!-f $xsel_log) {
+ make_path("$ENV{XDG_DATA_HOME}/clipbored");
+ open(my $fh, '>', $xsel_log) or die($!);
+ close($fh);
+}
+
+our($opt_no_daemon) = undef;
+GetOptions(
+ 'no-daemon' => \$opt_no_daemon,
+ 'kill' => \&killkid,
+ 'clear' => sub {
+ if(-e $xsel_log) {
+ open (my $fh, '>', $xsel_log) or die("Could not open $xsel_log: $!");
+ close($fh);
+ }
+ print "$xsel_log cleared\n";
+ exit(0);
+ },
+ 'last:i' => \&lastlog,
+ 'help' => sub {pod2usage(-verbose => 1) and exit(0)},
+ 'man' => sub {pod2usage(-verbose => 3) and exit(0)},
+);
+
+if(-f $pidfile) {
+ print "clipbored is already running\n";
+ exit(1);
+}
+
+sync_cb();
+
+sub lastlog {
+ shift;
+ my $wayback = shift // 25;
+ if($wayback < 1) {
+ $wayback = 25;
+ }
+ open(my $fh, '<', $xsel_log) or die("Could not open $xsel_log: $!");
+ my @records = <$fh>;
+ close($fh);
+
+ if(scalar(@records) < $wayback) {
+ $wayback = scalar(@records);
+ }
+
+ my $i = 0;
+ for(@records[0 .. $wayback-1]) {
+ printf("\e[1m%2d\e[0m %s", $i, $_);
+ $i++;
+ }
+ exit(0);
+}
+
+
+sub sync_cb {
+ daemonize() unless(defined($opt_no_daemon));
+ while(1) {
+ chomp(my $current_selection = `/usr/bin/xclip -o`);
+ if(defined($current_selection)) {
+ open(my $r_xsel, '<', $xsel_log) or die("Cant open $xsel_log: $!");
+ chomp(my @selections = <$r_xsel>);
+ close($r_xsel);
+ $current_selection =~ s/\n/ /g; # newline hassle
+
+ if($current_selection ~~ @selections) {
+
+ }
+ else {
+ open(my $a_xsel, '>>', $xsel_log) or die("Cant open $xsel_log: $!");
+ print $a_xsel $current_selection, "\n";
+ close($a_xsel);
+ print $current_selection, "\n" if(defined($opt_no_daemon));
+ }
+ }
+ else {
+ }
+ sleep 2;
+ }
+}
+
+sub killkid {
+ open(my $fh, '<', $pidfile) or print "clipbored is not running\n" and exit(1);
+ my $target = <$fh>;
+ close($fh);
+
+ if(kill(9, $target)) {
+ print "clipbored with PID $target terminated\n";
+ }
+ else {
+ print "Could not kill $target: $!";
+ }
+ exit(0);
+}
+
+sub daemonize {
+ use POSIX 'setsid';
+ my $PID = fork();
+ exit(0) if($PID); #parent
+ exit(1) if(!defined($PID)); # out of resources
+
+ setsid();
+ $PID = fork();
+ exit(1) if(!defined($PID));
+
+ if($PID) { # parent
+ waitpid($PID, 0);
+ unlink($pidfile); # remove the lock when child have died
+ exit(0);
+ }
+ elsif($PID == 0) { # child
+ open(my $fh, '>', $pidfile) or die("Cant open $pidfile: $!");
+ print $fh $$;
+ close($fh);
+ open(STDOUT, '>', '/dev/null');
+ open(STDERR, '>', '/dev/null');
+ open(STDIN, '<', '/dev/null');
+ }
+}
+
+
+=pod
+
+=head1 NAME
+
+ clipbored - continuously collects all selections in Xorg's clipboard buffers
+
+=head1 SYNOPSIS
+
+ clipbored [OPTIONS]
+
+=head1 DESCRIPTION
+
+B<clipbored> is a daemon that continuously grabs all non-duplicate selections
+in the X.org clipboard buffers and writes them to a plaintext history file for
+later use.
+
+There are several scripts distributed with clipbored that'll use the history
+file for different purposes.
+
+=head2 Scripts
+
+ dmenurl - launch dmenu with all previously yanked URLs for you to select
+ from.
+
+ dmenuclip - launch dmenu listing all previously clipboarded content
+
+ fmenuclip - do the same thing but vertically
+
+=head1 OPTIONS
+
+ -l, --last show the n latest additions
+ -c, --clear clear all history
+ -n, --no-daemon do not detach from the shell
+ -k, --kill kill a running clipbored session
+ -h, --help show this help
+ -m, --man display the manual
+
+=head1 ENVIRONMENT
+
+The history file will be placed in $XDG_DATA_HOME/clipbored/clips
+
+=head1 AUTHOR
+
+Written by Magnus Woldrich.
+
+=head1 REPORTING BUGS
+
+Report bugs to [email protected]
+
+clipbored home page: <http://github.com/trapd00r/clipbored/>
+
+=head1 COPYRIGHT
+
+(C) Copyright 2010 Magnus Woldrich.
+
+License GPLv2: GNU GPL version 2
+
+=cut
diff --git a/colorit b/colorit
new file mode 100755
index 0000000..627d310
--- a/dev/null
+++ b/colorit
@@ -0,0 +1,30 @@
+#!/bin/zsh
+autoload -U colors
+colors
+#set -x
+: ${(A)elements:=${(z)words}}
+while read LINE
+do
+ string=""
+ #LINE="${LINE// /%}"
+ #print $LINE
+ for word in ${=LINE}
+ do
+ color="39"
+ word=${(q)word}
+ #set -x
+ if (( ${+elements[(r)*(#i)$word*]} ));then
+ temp=${elements[(r)$word:*]}
+ if [[ $temp == *:* ]];then
+ color=${temp#*:}
+ fi
+ string+="%{%B%K{233}%F{$color}%}$word%{$reset_color%} "
+ else
+ string+="$word "
+ fi
+ #set +x
+ done
+ print -P $string
+done
+#set +x
diff --git a/colortest b/colortest
new file mode 100755
index 0000000..5762112
--- a/dev/null
+++ b/colortest
@@ -0,0 +1,365 @@
+#!/usr/bin/perl
+
+# by entheon, do whatever the hell you want with this file
+
+print "\n";
+print "**************************\n";
+print "*XTERM 256Color Test Chart\n";
+print "**************************\n";
+print "* 16 = black\n";
+print "* 255 = white\n";
+print "*\n";
+print "* Usage:\n";
+print "* colortest -w\n";
+print "* wide display\n";
+print "*\n";
+print "* colortest -w -r\n";
+print "* wide display reversed\n";
+print "*\n";
+print "* colortest -w -s\n";
+print "* extra spaces padding\n";
+print "*\n";
+print "* colortest -w -r -s\n";
+print "* available combination\n";
+print "*\n";
+print "**************************\n";
+
+if( $ARGV[0] eq "-w" || $ARGV[1] eq "-w" || $ARGV[2] eq "-w" ) {
+ push(@arr, [( " 16: 00/00/00", " 17: 00/00/5f", " 18: 00/00/87", " 19: 00/00/af", " 20: 00/00/d7", " 21: 00/00/ff")] );
+ push(@arr, [( " 22: 00/5f/00", " 23: 00/5f/5f", " 24: 00/5f/87", " 25: 00/5f/af", " 26: 00/5f/d7", " 27: 00/5f/ff")] );
+ push(@arr, [( " 28: 00/87/00", " 29: 00/87/5f", " 30: 00/87/87", " 31: 00/87/af", " 32: 00/87/d7", " 33: 00/87/ff")] );
+ push(@arr, [( " 34: 00/af/00", " 35: 00/af/5f", " 36: 00/af/87", " 37: 00/af/af", " 38: 00/af/d7", " 39: 00/af/ff")] );
+ push(@arr, [( " 40: 00/d7/00", " 41: 00/d7/5f", " 42: 00/d7/87", " 43: 00/d7/af", " 44: 00/d7/d7", " 45: 00/d7/ff")] );
+ push(@arr, [( " 46: 00/ff/00", " 47: 00/ff/5f", " 48: 00/ff/87", " 49: 00/ff/af", " 50: 00/ff/d7", " 51: 00/ff/ff")] );
+ push(@arr, [( " 52: 5f/00/00", " 53: 5f/00/5f", " 54: 5f/00/87", " 55: 5f/00/af", " 56: 5f/00/d7", " 57: 5f/00/ff")] );
+ push(@arr, [( " 58: 5f/5f/00", " 59: 5f/5f/5f", " 60: 5f/5f/87", " 61: 5f/5f/af", " 62: 5f/5f/d7", " 63: 5f/5f/ff")] );
+ push(@arr, [( " 64: 5f/87/00", " 65: 5f/87/5f", " 66: 5f/87/87", " 67: 5f/87/af", " 68: 5f/87/d7", " 69: 5f/87/ff")] );
+ push(@arr, [( " 70: 5f/af/00", " 71: 5f/af/5f", " 72: 5f/af/87", " 73: 5f/af/af", " 74: 5f/af/d7", " 75: 5f/af/ff")] );
+ push(@arr, [( " 76: 5f/d7/00", " 77: 5f/d7/5f", " 78: 5f/d7/87", " 79: 5f/d7/af", " 80: 5f/d7/d7", " 81: 5f/d7/ff")] );
+ push(@arr, [( " 82: 5f/ff/00", " 83: 5f/ff/5f", " 84: 5f/ff/87", " 85: 5f/ff/af", " 86: 5f/ff/d7", " 87: 5f/ff/ff")] );
+ push(@arr, [( " 88: 87/00/00", " 89: 87/00/5f", " 90: 87/00/87", " 91: 87/00/af", " 92: 87/00/d7", " 93: 87/00/ff")] );
+ push(@arr, [( " 94: 87/5f/00", " 95: 87/5f/5f", " 96: 87/5f/87", " 97: 87/5f/af", " 98: 87/5f/d7", " 99: 87/5f/ff")] );
+ push(@arr, [( " 100: 87/87/00", " 101: 87/87/5f", " 102: 87/87/87", " 103: 87/87/af", " 104: 87/87/d7", " 105: 87/87/ff")] );
+ push(@arr, [( " 106: 87/af/00", " 107: 87/af/5f", " 108: 87/af/87", " 109: 87/af/af", " 110: 87/af/d7", " 111: 87/af/ff")] );
+ push(@arr, [( " 112: 87/d7/00", " 113: 87/d7/5f", " 114: 87/d7/87", " 115: 87/d7/af", " 116: 87/d7/d7", " 117: 87/d7/ff")] );
+ push(@arr, [( " 118: 87/ff/00", " 119: 87/ff/5f", " 120: 87/ff/87", " 121: 87/ff/af", " 122: 87/ff/d7", " 123: 87/ff/ff")] );
+ push(@arr, [( " 124: af/00/00", " 125: af/00/5f", " 126: af/00/87", " 127: af/00/af", " 128: af/00/d7", " 129: af/00/ff")] );
+ push(@arr, [( " 130: af/5f/00", " 131: af/5f/5f", " 132: af/5f/87", " 133: af/5f/af", " 134: af/5f/d7", " 135: af/5f/ff")] );
+ push(@arr, [( " 136: af/87/00", " 137: af/87/5f", " 138: af/87/87", " 139: af/87/af", " 140: af/87/d7", " 141: af/87/ff")] );
+ push(@arr, [( " 142: af/af/00", " 143: af/af/5f", " 144: af/af/87", " 145: af/af/af", " 146: af/af/d7", " 147: af/af/ff")] );
+ push(@arr, [( " 148: af/d7/00", " 149: af/d7/5f", " 150: af/d7/87", " 151: af/d7/af", " 152: af/d7/d7", " 153: af/d7/ff")] );
+ push(@arr, [( " 154: af/ff/00", " 155: af/ff/5f", " 156: af/ff/87", " 157: af/ff/af", " 158: af/ff/d7", " 159: af/ff/ff")] );
+ push(@arr, [( " 160: d7/00/00", " 161: d7/00/5f", " 162: d7/00/87", " 163: d7/00/af", " 164: d7/00/d7", " 165: d7/00/ff")] );
+ push(@arr, [( " 166: d7/5f/00", " 167: d7/5f/5f", " 168: d7/5f/87", " 169: d7/5f/af", " 170: d7/5f/d7", " 171: d7/5f/ff")] );
+ push(@arr, [( " 172: d7/87/00", " 173: d7/87/5f", " 174: d7/87/87", " 175: d7/87/af", " 176: d7/87/d7", " 177: d7/87/ff")] );
+ push(@arr, [( " 178: d7/af/00", " 179: d7/af/5f", " 180: d7/af/87", " 181: d7/af/af", " 182: d7/af/d7", " 183: d7/af/ff")] );
+ push(@arr, [( " 184: d7/d7/00", " 185: d7/d7/5f", " 186: d7/d7/87", " 187: d7/d7/af", " 188: d7/d7/d7", " 189: d7/d7/ff")] );
+ push(@arr, [( " 190: d7/ff/00", " 191: d7/ff/5f", " 192: d7/ff/87", " 193: d7/ff/af", " 194: d7/ff/d7", " 195: d7/ff/ff")] );
+ push(@arr, [( " 196: ff/00/00", " 197: ff/00/5f", " 198: ff/00/87", " 199: ff/00/af", " 200: ff/00/d7", " 201: ff/00/ff")] );
+ push(@arr, [( " 202: ff/5f/00", " 203: ff/5f/5f", " 204: ff/5f/87", " 205: ff/5f/af", " 206: ff/5f/d7", " 207: ff/5f/ff")] );
+ push(@arr, [( " 208: ff/87/00", " 209: ff/87/5f", " 210: ff/87/87", " 211: ff/87/af", " 212: ff/87/d7", " 213: ff/87/ff")] );
+ push(@arr, [( " 214: ff/af/00", " 215: ff/af/5f", " 216: ff/af/87", " 217: ff/af/af", " 218: ff/af/d7", " 219: ff/af/ff")] );
+ push(@arr, [( " 220: ff/d7/00", " 221: ff/d7/5f", " 222: ff/d7/87", " 223: ff/d7/af", " 224: ff/d7/d7", " 225: ff/d7/ff")] );
+ push(@arr, [( " 226: ff/ff/00", " 227: ff/ff/5f", " 228: ff/ff/87", " 229: ff/ff/af", " 230: ff/ff/d7", " 231: ff/ff/ff")] );
+ push(@arr, [( " 232: 08/08/08", " 233: 12/12/12", " 234: 1c/1c/1c", " 235: 26/26/26", " 236: 30/30/30", " 237: 3a/3a/3a")] );
+ push(@arr, [( " 238: 44/44/44", " 239: 4e/4e/4e", " 240: 58/58/58", " 241: 62/62/62", " 242: 6c/6c/6c", " 243: 76/76/76")] );
+ push(@arr, [( " 244: 80/80/80", " 245: 8a/8a/8a", " 246: 94/94/94", " 247: 9e/9e/9e", " 248: a8/a8/a8", " 249: b2/b2/b2")] );
+ push(@arr, [( " 250: bc/bc/bc", " 251: c6/c6/c6", " 252: d0/d0/d0", " 253: da/da/da", " 254: e4/e4/e4", " 255: ee/ee/ee")] );
+
+ if( $ARGV[0] eq "-s" || $ARGV[1] eq "-s" || $ARGV[2] eq "-s" ){
+ $padding = " ";
+ }
+ else {
+
+ }
+
+ # display in reverse order
+ if( $ARGV[0] eq "-r" || $ARGV[1] eq "-r" || $ARGV[2] eq "-r" ){
+ for( $dimone = 0; $dimone < scalar @arr; $dimone++ ) {
+
+ $seed = ($dimone % 6) * -1;
+ for( $dimtwo = 0; $dimtwo < 6; $dimtwo++ ) {
+
+ $movone = $seed;
+ $movtwo = $seed * -1;
+
+ print $arr[$dimone][$dimtwo] . $padding;
+
+ $seed = $seed+1;
+ }
+
+ print "\n";
+ }
+ }
+ else {
+ for( $dimone = 0; $dimone < scalar @arr; $dimone++ ) {
+
+ $seed = ($dimone % 6) * -1;
+ for( $dimtwo = 0; $dimtwo < 6; $dimtwo++ ) {
+
+ $movone = $seed;
+ $movtwo = $seed * -1;
+
+ $newone = $dimone+$movone;
+ $newtwo = $dimtwo+$movtwo;
+
+ if( $newone < scalar @arr ){
+ print $arr[$newone][$newtwo] . $padding;
+ }
+
+ $seed = $seed+1;
+ }
+
+ print "\n";
+ }
+ }
+ print "\n";
+ print "\n";
+
+}
+else {
+ print " 16: 00/00/00\n";
+ print " 17: 00/00/5f\n";
+ print " 18: 00/00/87\n";
+ print " 19: 00/00/af\n";
+ print " 20: 00/00/d7\n";
+ print " 21: 00/00/ff\n";
+ print " 22: 00/5f/00\n";
+ print " 23: 00/5f/5f\n";
+ print " 24: 00/5f/87\n";
+ print " 25: 00/5f/af\n";
+ print " 26: 00/5f/d7\n";
+ print " 27: 00/5f/ff\n";
+ print " 28: 00/87/00\n";
+ print " 29: 00/87/5f\n";
+ print " 30: 00/87/87\n";
+ print " 31: 00/87/af\n";
+ print " 32: 00/87/d7\n";
+ print " 33: 00/87/ff\n";
+ print " 34: 00/af/00\n";
+ print " 35: 00/af/5f\n";
+ print " 36: 00/af/87\n";
+ print " 37: 00/af/af\n";
+ print " 38: 00/af/d7\n";
+ print " 39: 00/af/ff\n";
+ print " 40: 00/d7/00\n";
+ print " 41: 00/d7/5f\n";
+ print " 42: 00/d7/87\n";
+ print " 43: 00/d7/af\n";
+ print " 44: 00/d7/d7\n";
+ print " 45: 00/d7/ff\n";
+ print " 46: 00/ff/00\n";
+ print " 47: 00/ff/5f\n";
+ print " 48: 00/ff/87\n";
+ print " 49: 00/ff/af\n";
+ print " 50: 00/ff/d7\n";
+ print " 51: 00/ff/ff\n";
+ print " 52: 5f/00/00\n";
+ print " 53: 5f/00/5f\n";
+ print " 54: 5f/00/87\n";
+ print " 55: 5f/00/af\n";
+ print " 56: 5f/00/d7\n";
+ print " 57: 5f/00/ff\n";
+ print " 58: 5f/5f/00\n";
+ print " 59: 5f/5f/5f\n";
+ print " 60: 5f/5f/87\n";
+ print " 61: 5f/5f/af\n";
+ print " 62: 5f/5f/d7\n";
+ print " 63: 5f/5f/ff\n";
+ print " 64: 5f/87/00\n";
+ print " 65: 5f/87/5f\n";
+ print " 66: 5f/87/87\n";
+ print " 67: 5f/87/af\n";
+ print " 68: 5f/87/d7\n";
+ print " 69: 5f/87/ff\n";
+ print " 70: 5f/af/00\n";
+ print " 71: 5f/af/5f\n";
+ print " 72: 5f/af/87\n";
+ print " 73: 5f/af/af\n";
+ print " 74: 5f/af/d7\n";
+ print " 75: 5f/af/ff\n";
+ print " 76: 5f/d7/00\n";
+ print " 77: 5f/d7/5f\n";
+ print " 78: 5f/d7/87\n";
+ print " 79: 5f/d7/af\n";
+ print " 80: 5f/d7/d7\n";
+ print " 81: 5f/d7/ff\n";
+ print " 82: 5f/ff/00\n";
+ print " 83: 5f/ff/5f\n";
+ print " 84: 5f/ff/87\n";
+ print " 85: 5f/ff/af\n";
+ print " 86: 5f/ff/d7\n";
+ print " 87: 5f/ff/ff\n";
+ print " 88: 87/00/00\n";
+ print " 89: 87/00/5f\n";
+ print " 90: 87/00/87\n";
+ print " 91: 87/00/af\n";
+ print " 92: 87/00/d7\n";
+ print " 93: 87/00/ff\n";
+ print " 94: 87/5f/00\n";
+ print " 95: 87/5f/5f\n";
+ print " 96: 87/5f/87\n";
+ print " 97: 87/5f/af\n";
+ print " 98: 87/5f/d7\n";
+ print " 99: 87/5f/ff\n";
+ print " 100 :87/87/00\n";
+ print " 101 :87/87/5f\n";
+ print " 102 :87/87/87\n";
+ print " 103 :87/87/af\n";
+ print " 104 :87/87/d7\n";
+ print " 105 :87/87/ff\n";
+ print " 106 :87/af/00\n";
+ print " 107 :87/af/5f\n";
+ print " 108 :87/af/87\n";
+ print " 109 :87/af/af\n";
+ print " 110 :87/af/d7\n";
+ print " 111 :87/af/ff\n";
+ print " 112 :87/d7/00\n";
+ print " 113 :87/d7/5f\n";
+ print " 114 :87/d7/87\n";
+ print " 115 :87/d7/af\n";
+ print " 116 :87/d7/d7\n";
+ print " 117 :87/d7/ff\n";
+ print " 118 :87/ff/00\n";
+ print " 119 :87/ff/5f\n";
+ print " 120 :87/ff/87\n";
+ print " 121 :87/ff/af\n";
+ print " 122 :87/ff/d7\n";
+ print " 123 :87/ff/ff\n";
+ print " 124 :af/00/00\n";
+ print " 125 :af/00/5f\n";
+ print " 126 :af/00/87\n";
+ print " 127 :af/00/af\n";
+ print " 128 :af/00/d7\n";
+ print " 129 :af/00/ff\n";
+ print " 130 :af/5f/00\n";
+ print " 131 :af/5f/5f\n";
+ print " 132 :af/5f/87\n";
+ print " 133 :af/5f/af\n";
+ print " 134 :af/5f/d7\n";
+ print " 135 :af/5f/ff\n";
+ print " 136 :af/87/00\n";
+ print " 137 :af/87/5f\n";
+ print " 138 :af/87/87\n";
+ print " 139 :af/87/af\n";
+ print " 140 :af/87/d7\n";
+ print " 141 :af/87/ff\n";
+ print " 142 :af/af/00\n";
+ print " 143 :af/af/5f\n";
+ print " 144 :af/af/87\n";
+ print " 145 :af/af/af\n";
+ print " 146 :af/af/d7\n";
+ print " 147 :af/af/ff\n";
+ print " 148 :af/d7/00\n";
+ print " 149 :af/d7/5f\n";
+ print " 150 :af/d7/87\n";
+ print " 151 :af/d7/af\n";
+ print " 152 :af/d7/d7\n";
+ print " 153 :af/d7/ff\n";
+ print " 154 :af/ff/00\n";
+ print " 155 :af/ff/5f\n";
+ print " 156 :af/ff/87\n";
+ print " 157 :af/ff/af\n";
+ print " 158 :af/ff/d7\n";
+ print " 159 :af/ff/ff\n";
+ print " 160 :d7/00/00\n";
+ print " 161 :d7/00/5f\n";
+ print " 162 :d7/00/87\n";
+ print " 163 :d7/00/af\n";
+ print " 164 :d7/00/d7\n";
+ print " 165 :d7/00/ff\n";
+ print " 166 :d7/5f/00\n";
+ print " 167 :d7/5f/5f\n";
+ print " 168 :d7/5f/87\n";
+ print " 169 :d7/5f/af\n";
+ print " 170 :d7/5f/d7\n";
+ print " 171 :d7/5f/ff\n";
+ print " 172 :d7/87/00\n";
+ print " 173 :d7/87/5f\n";
+ print " 174 :d7/87/87\n";
+ print " 175 :d7/87/af\n";
+ print " 176 :d7/87/d7\n";
+ print " 177 :d7/87/ff\n";
+ print " 178 :d7/af/00\n";
+ print " 179 :d7/af/5f\n";
+ print " 180 :d7/af/87\n";
+ print " 181 :d7/af/af\n";
+ print " 182 :d7/af/d7\n";
+ print " 183 :d7/af/ff\n";
+ print " 184 :d7/d7/00\n";
+ print " 185 :d7/d7/5f\n";
+ print " 186 :d7/d7/87\n";
+ print " 187 :d7/d7/af\n";
+ print " 188 :d7/d7/d7\n";
+ print " 189 :d7/d7/ff\n";
+ print " 190 :d7/ff/00\n";
+ print " 191 :d7/ff/5f\n";
+ print " 192 :d7/ff/87\n";
+ print " 193 :d7/ff/af\n";
+ print " 194 :d7/ff/d7\n";
+ print " 195 :d7/ff/ff\n";
+ print " 196 :ff/00/00\n";
+ print " 197 :ff/00/5f\n";
+ print " 198 :ff/00/87\n";
+ print " 199 :ff/00/af\n";
+ print " 200 :ff/00/d7\n";
+ print " 201 :ff/00/ff\n";
+ print " 202 :ff/5f/00\n";
+ print " 203 :ff/5f/5f\n";
+ print " 204 :ff/5f/87\n";
+ print " 205 :ff/5f/af\n";
+ print " 206 :ff/5f/d7\n";
+ print " 207 :ff/5f/ff\n";
+ print " 208 :ff/87/00\n";
+ print " 209 :ff/87/5f\n";
+ print " 210 :ff/87/87\n";
+ print " 211 :ff/87/af\n";
+ print " 212 :ff/87/d7\n";
+ print " 213 :ff/87/ff\n";
+ print " 214 :ff/af/00\n";
+ print " 215 :ff/af/5f\n";
+ print " 216 :ff/af/87\n";
+ print " 217 :ff/af/af\n";
+ print " 218 :ff/af/d7\n";
+ print " 219 :ff/af/ff\n";
+ print " 220 :ff/d7/00\n";
+ print " 221 :ff/d7/5f\n";
+ print " 222 :ff/d7/87\n";
+ print " 223 :ff/d7/af\n";
+ print " 224 :ff/d7/d7\n";
+ print " 225 :ff/d7/ff\n";
+ print " 226 :ff/ff/00\n";
+ print " 227 :ff/ff/5f\n";
+ print " 228 :ff/ff/87\n";
+ print " 229 :ff/ff/af\n";
+ print " 230 :ff/ff/d7\n";
+ print " 231 :ff/ff/ff\n";
+ print " 232 :08/08/08\n";
+ print " 233 :12/12/12\n";
+ print " 234 :1c/1c/1c\n";
+ print " 235 :26/26/26\n";
+ print " 236 :30/30/30\n";
+ print " 237 :3a/3a/3a\n";
+ print " 238 :44/44/44\n";
+ print " 239 :4e/4e/4e\n";
+ print " 240 :58/58/58\n";
+ print " 241 :62/62/62\n";
+ print " 242 :6c/6c/6c\n";
+ print " 243 :76/76/76\n";
+ print " 244 :80/80/80\n";
+ print " 245 :8a/8a/8a\n";
+ print " 246 :94/94/94\n";
+ print " 247 :9e/9e/9e\n";
+ print " 248 :a8/a8/a8\n";
+ print " 249 :b2/b2/b2\n";
+ print " 250 :bc/bc/bc\n";
+ print " 251 :c6/c6/c6\n";
+ print " 252 :d0/d0/d0\n";
+ print " 253 :da/da/da\n";
+ print " 254 :e4/e4/e4\n";
+ print " 255 :ee/ee/ee\n";
+ print "\n";
+ print "\n";
+}
+print "0m";
+exit;
diff --git a/consume b/consume
new file mode 100755
index 0000000..a180434
--- a/dev/null
+++ b/consume
@@ -0,0 +1,39 @@
+#!/usr/bin/env perl
+
+use Tie::File;
+use IO::Handle;
+$FPATH="$ENV{'HOME'}/logs/consume.log";
+
+open OUTPUT, '>', $FPATH or die $!;
+open ERROR, '>', $FPATH or die $!;
+
+STDOUT->fdopen( \*OUTPUT, 'w' ) or die $!;
+STDERR->fdopen( \*ERROR, 'w' ) or die $!;
+
+$filename=$ARGV[0];
+#print $filename;
+my @lines;
+my $crumb;
+
+tie @lines, 'Tie::File', $filename or die "Tieing failed : $!";
+if (scalar(@lines) == 0){
+ untie @lines;
+ print "Gulp! Nothing more to consume";
+ exit 127;
+}
+
+$counter = 0;
+foreach(@lines){
+ $counter++;
+ if(/^[^%]/){
+ # First non consumed line
+ $crumb = $_;
+ s/^/%/;
+ last;
+ }
+}
+untie @lines;
+#print $crumb;
+chomp($crumb);
+$crumb and system($crumb) == 0 or print "$crumb execution failed $!";
+
diff --git a/corer b/corer
new file mode 100755
index 0000000..d1411e3
--- a/dev/null
+++ b/corer
@@ -0,0 +1,28 @@
+#!/bin/zsh
+
+
+>| /tmp/.core
+
+
+params=(${(s|:|)args})
+
+CORE_DIR="/home/raghavendra/Arch/cores/"
+
+temp=($CORE_DIR/*)
+
+if [[ $#temp > 20 ]];then
+ echo "NO CORE file for now. Space isn't infinite" >>| ~/Arch/.cores
+ rm /tmp/.core
+else
+ mv /tmp/.core ~/Arch/cores/$args.core
+fi
+echo "Time $(date -d @$params[1]):
+
+ File $params[2]
+ with PID $params[3]
+and killed by $params[4]
+Moved into ~/Arch/cores/$args.core " >>| ~/Arch/.cores
+
+echo "==========================
+ ===========================" >>| ~/Arch/.cores
diff --git a/cpustat b/cpustat
new file mode 100755
index 0000000..90e1acd
--- a/dev/null
+++ b/cpustat
@@ -0,0 +1,11 @@
+#!/bin/zsh
+idle=$(mpstat | tail -1 | awk '{ print $12 }')
+print $idle
+util=${$((100.00-$idle))%.*}
+
+if [[ $util -ge 50 ]];then
+ echo -n "CPU: <fc=#f0c040>$util</fc>%"
+else
+ echo -n "CPU: $util%"
+fi
+
diff --git a/current b/current
new file mode 100755
index 0000000..31a02ae
--- a/dev/null
+++ b/current
@@ -0,0 +1,17 @@
+#!/bin/zsh
+setopt shwordsplit
+isurl () {
+ curl -I -s "$url" &>/dev/null || return 1
+ return 0
+ }
+export yprofile="current"
+
+while :;do
+ object=$(tac ~/.current | awk 'NF>0 { print $0 }' | ${DMENU} )
+ [[ -z $object ]] && break
+ object=${(Q)object}
+ if isurl $object;then firefox -new-tab $object; continue ;fi
+ xclip -i <<< "$object"
+ if =$object;then $object &!;fi
+done
diff --git a/dailyshow b/dailyshow
new file mode 100755
index 0000000..c2a0bf5
--- a/dev/null
+++ b/dailyshow
@@ -0,0 +1,74 @@
+#!/bin/bash
+#
+# This script depends on curl and rtmpdump.
+# For playback I recommend mplayer.
+# Suggestions are welcome.
+# Daily show rocks
+
+RTMPDUMP="rtmpdump" # the name of the binary
+
+set -eu
+
+if [ $# = 1 ] && [ ${#1} = 6 ] ; then
+ ID="$1"
+elif [ $# = 1 ] && echo "$1" | grep -E -q "http://media.mtvnservices.com/mgid:cms:[^:]+:comedycentral.com:[0-9]+" ; then
+ ID=`echo "$1" | cut -d : -f 6`
+elif [ $# = 1 ] && echo "$1" | grep -E -q "http://.+" ; then
+ if ! ID=`curl -s "$1" | grep -E -m 1 -o "http://media.mtvnservices.com/mgid:cms:[^:]+:comedycentral.com:[0-9]+" | cut -d : -f 6` ; then
+ echo "error: could not extract video id"
+ exit 1
+ fi
+else
+ BA=`basename "$0"`
+ echo "usage:"
+ echo " $BA \${ID}"
+ echo " $BA http://www.thedailyshow.com/full-episodes/\${ID}/title-of-the-episode"
+ echo " $BA http://www.thedailyshow.com/watch/some-kind-of-date/title-of-video"
+# echo " $BA http://www.comedycentral.com/colbertreport/full-episodes/index.jhtml?episodeId=\${ID}"
+ echo " $BA http://www.colbertnation.com/full-episodes/date-and-title-of-the-episode"
+ echo " $BA http://media.mtvnservices.com/mgid:cms:item:comedycentral.com:\${ID}"
+ echo " $BA http://media.mtvnservices.com/mgid:cms:video:comedycentral.com:\${ID}"
+ echo " $BA http://media.mtvnservices.com/mgid:cms:fullepisode:comedycentral.com:\${ID}"
+ exit 1
+fi
+
+echo "ID = $ID"
+
+RTMPDUMP_OPTS="--swfUrl "http://media.mtvnservices.com/player/release/?v=4.1.2" --swfsize 536258 --swfhash f98296daddbd723bb2f740a6c276535638038b128857c8e4e750664e9e592468 --resume"
+
+GEN_URL="http://media.mtvnservices.com/player/config.jhtml?uri=mgid:cms:item:comedycentral.com:${ID}&group=entertainment&type=error"
+PARTS=`curl -s "$GEN_URL" | grep media:content | grep -v bumper | cut -d \" -f 2`
+
+echo -n "PARTS = " ; echo $PARTS
+
+FILENAMES=""
+
+# download parts in parallel
+for X in $PARTS ; do
+ VIDEO_URL=`curl -s "$X" | grep edgefcs.net | tail -n 1 | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e s/rtmpe/rtmp/`
+ echo "VIDEO_URL = $VIDEO_URL"
+ FILENAME=`basename "$VIDEO_URL"`
+ $RTMPDUMP $RTMPDUMP_OPTS -o "$FILENAME" -r "$VIDEO_URL" &
+ FILENAMES="$FILENAMES $FILENAME"
+done
+wait
+
+# here is an example of how you can combine the parts into a proper video:
+# NOTE: downloaded files are not actually in mp4 format!
+#
+#mv ds_15001_01_640x360_1300.mp4 ds_15001_01_640x360_1300.flv
+#mv ds_15001_02_640x360_1300.mp4 ds_15001_02_640x360_1300.flv
+#mv ds_15001_03_640x360_1300.mp4 ds_15001_03_640x360_1300.flv
+#mv ds_15001_04_640x360_1300.mp4 ds_15001_04_640x360_1300.flv
+#
+#ffmpeg -acodec copy -vcodec copy -i ds_15001_01_640x360_1300.flv ds_15001_01_640x360_1300.mp4
+#ffmpeg -acodec copy -vcodec copy -i ds_15001_02_640x360_1300.flv ds_15001_02_640x360_1300.mp4
+#ffmpeg -acodec copy -vcodec copy -i ds_15001_03_640x360_1300.flv ds_15001_03_640x360_1300.mp4
+#ffmpeg -acodec copy -vcodec copy -i ds_15001_04_640x360_1300.flv ds_15001_04_640x360_1300.mp4
+#
+#MP4Box -add ds_15001_01_640x360_1300.mp4 -cat ds_15001_02_640x360_1300.mp4 -cat ds_15001_03_640x360_1300.mp4 -cat ds_15001_04_640x360_1300.mp4 -new ds_15001.mp4
+#
+
+echo
+echo "play it with:"
+echo "mplayer -fixed-vo -fs${FILENAMES}"
diff --git a/dbg-trace.sh b/dbg-trace.sh
new file mode 100644
index 0000000..e7b6b19
--- a/dev/null
+++ b/dbg-trace.sh
@@ -0,0 +1,79 @@
+# -*- shell-script -*-
+# This program needs to be SOURCE'd and is not called as an executable
+#
+# Copyright (C) 2008, 2011 Rocky Bernstein <[email protected]>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 59 Temple Place, Suite 330, Boston,
+# MA 02111 USA.
+
+typeset _Dbg_trace_old_set_opts
+_Dbg_trace_old_set_opts=$-
+set +u
+
+# Name we refer to ourselves by
+typeset _Dbg_debugger_name='zshdb'
+
+# The shell we are configured to run under.
+typeset _Dbg_shell='/bin/zsh'
+
+# The short shell name. Helps keep code common in bash, zsh, and ksh debuggers.
+# Equivalent to
+typeset _Dbg_shell_name=${_Dbg_shell##*/}
+
+typeset -a _Dbg_script_args=("[email protected]")
+
+# Original $0. Note we can't set this in an include.
+typeset _Dbg_orig_0=$0
+
+# Equivalent to basename $0; the short program name
+typeset _Dbg_pname=${0##*/}
+
+## Stuff set by autoconf/configure ###
+typeset prefix=/usr/local
+typeset _Dbg_libdir=${prefix}/share/zshdb
+###
+
+[[ ! -d $_Dbg_libdir ]] && _Dbg_libdir='.'
+# Parse just the libdir option
+typeset -a libdir
+zparseopts -a libdir -E L: -library:
+if (( ${#libdir} > 0 )) ; then
+ typeset -a lib_opts; eval "lib_opts=($libdir)"
+ if [[ ! -d ${lib_opts[2]} ]] ; then
+ print "${lib_opts[2]} is not a directory"
+ exit 1
+ fi
+ _Dbg_libdir=${lib_opts[2]}
+ unset lib_opts
+fi
+
+# Pull in the rest of the debugger code.
+typeset _Dbg_main="$_Dbg_libdir/dbg-main.sh"
+if [[ ! -r $_Dbg_main ]] ; then
+ print "${_Dbg_pname}: Can't read debugger library file '${_Dbg_main}'."
+ print "${_Dbg_pname}: Perhaps zshdb is installed wrong (if its installed)." >&2
+ print "${_Dbg_pname}: Try running zshdb using -L (with a different directory)." >&2
+ print "${_Dbg_pname}: Run zshdb --help for a list and explanation of options." >&2
+ exit 1
+
+fi
+. ${_Dbg_libdir}/dbg-main.sh
+
+set -${_Dbg_trace_old_set_opts}
+unset _Dbg_trace_old_set_opts
+
+_Dbg_debugger() {
+ trap '_Dbg_trap_handler $? "[email protected]"' DEBUG
+}
diff --git a/diditchange b/diditchange
new file mode 100755
index 0000000..b02fd15
--- a/dev/null
+++ b/diditchange
@@ -0,0 +1,27 @@
+#!/bin/zsh
+#set -x
+url=$1
+DDIR="$HOME/.local/share/diditchange"
+[[ ! -d $DDIR ]] && mkdir -p $DDIR
+
+file=$(tr '/' '.' <<< ${url##*//})
+tfile=`mktemp`
+#curl -s $website
+#echo $website
+if [[ ! -f ${DDIR}/$file ]];then
+ curl -s $url | md5sum > ${DDIR}/$file
+ ~/bin/diditchange $url
+else
+ source ~/.zsh/.zsh_functions
+ curl -s $url | md5sum >| $tfile
+
+ if ! /usr/bin/diff $tfile ${DDIR}/$file; then
+ cp $tfile ${DDIR}/$file
+ mecho "Site $url has changed"
+ else
+ mecho "No difference !"
+ exit 1
+ fi
+fi
+rm $tfile
+#set +x
diff --git a/dmenu_run b/dmenu_run
new file mode 100755
index 0000000..3313680
--- a/dev/null
+++ b/dmenu_run
@@ -0,0 +1,4 @@
+#!/bin/zsh
+PATH="/home/raghavendra/bin:/opt/wine/bin:/bin:/usr/bin:/sbin:/usr/sbin"
+#exe=`dmenu_path_c | dmenu -i -fa 'xft:Bitstream:pixelsize=14' -nb black -nf white -p : ${1+"[email protected]"}` && exec ${=exe}
+exe=`dmenu_path | yeganesh -f -- -i -fn 'xft:Bitstream:pixelsize=19' -m 1 -sb aquamarine4 -nb grey2 -nf grey50 -p : ` && exec ${=exe}
diff --git a/dmenuclip b/dmenuclip
new file mode 100755
index 0000000..cb6abd1
--- a/dev/null
+++ b/dmenuclip
@@ -0,0 +1,52 @@
+#!/usr/bin/perl
+# dmenuclip - Really nasty dmenu launcher to be used in combination with
+# clipbored
+use strict;
+
+my $xsels = "$ENV{XDG_DATA_HOME}/clipbored/clips";
+
+my $font = (exists $ENV{CLIPBORED_DMENU_FONT})
+ ? $ENV{CLIPBORED_DMENU_FONT}
+ : '-*-fixed-*-*-*-*-15-*-*-*-*-*-*-*';
+my $normal_bg = (exists $ENV{CLIPBORED_DMENU_NORMAL_BG})
+ ? $ENV{CLIPBORED_DMENU_NORMAL_BG}
+ : "#1c1c1c";
+my $normal_fg = (exists $ENV{CLIPBORED_DMENU_NORMAL_FG})
+ ? $ENV{CLIPBORED_DMENU_NORMAL_FG}
+ : "#ffffff";
+my $select_bg = (exists $ENV{CLIPBORED_DMENU_SELECT_BG})
+ ? $ENV{CLIPBORED_DMENU_SELECT_BG}
+ : "#484848";
+my $select_fg = (exists $ENV{CLIPBORED_DMENU_SELECT_FG})
+ ? $ENV{CLIPBORED_DMENU_SELECT_FG}
+ : "#ffffff";
+
+my $list_mode = (exists $ENV{CLIPBORED_DMENU_LISTMODE})
+ ? dmenu_mode($ENV{CLIPBORED_DMENU_LISTMODE})
+ : dmenu_mode('vertical');
+
+# If defined, we want vertical mode
+if(exists($ENV{CLIPBORED_DMENU_LINES})) {
+ $list_mode = "-l $ENV{CLIPBORED_DMENU_LINES}";
+}
+
+sub dmenu_mode {
+ my $what = shift;
+
+ if($what eq 'vertical') {
+ return("-l 15");
+ }
+ else {
+ return("");
+ }
+}
+
+$normal_bg =~ s/^(#)/\\$1/;
+$normal_fg =~ s/^(#)/\\$1/;
+$select_bg =~ s/^(#)/\\$1/;
+$select_fg =~ s/^(#)/\\$1/;
+
+my $dmenu = "dmenu -i -b $list_mode "
+ . "-nb $normal_bg -nf $normal_fg -sb $select_bg -sf $select_fg";
+
+system("printf \"\$(\\tac $xsels|$dmenu -fn $font -p dmenuclip)\"|xclip -i -l 0|xclip -o");
diff --git a/dmenurl b/dmenurl
new file mode 100755
index 0000000..a275a86
--- a/dev/null
+++ b/dmenurl
@@ -0,0 +1,74 @@
+#!/usr/bin/perl
+use strict;
+# dmenurl - pick and choose url's in the xsel file
+
+my $xsels = "$ENV{XDG_DATA_HOME}/clipbored/clips";
+
+my $font = (exists $ENV{CLIPBORED_DMENU_FONT})
+ ? $ENV{CLIPBORED_DMENU_FONT}
+ : '-*-fixed-*-*-*-*-15-*-*-*-*-*-*-*';
+my $normal_bg = (exists $ENV{CLIPBORED_DMENU_NORMAL_BG})
+ ? $ENV{CLIPBORED_DMENU_NORMAL_BG}
+ : "#1c1c1c";
+my $normal_fg = (exists $ENV{CLIPBORED_DMENU_NORMAL_FG})
+ ? $ENV{CLIPBORED_DMENU_NORMAL_FG}
+ : "#ffffff";
+my $select_bg = (exists $ENV{CLIPBORED_DMENU_SELECT_BG})
+ ? $ENV{CLIPBORED_DMENU_SELECT_BG}
+ : "#484848";
+my $select_fg = (exists $ENV{CLIPBORED_DMENU_SELECT_FG})
+ ? $ENV{CLIPBORED_DMENU_SELECT_FG}
+ : "#ffffff";
+
+
+my $list_mode = (exists $ENV{CLIPBORED_DMENU_LISTMODE})
+ ? dmenu_mode($ENV{CLIPBORED_DMENU_LISTMODE})
+ : dmenu_mode('vertical');
+
+# If defined, we want vertical mode
+if(exists($ENV{CLIPBORED_DMENU_LINES})) {
+ $list_mode = "-l $ENV{CLIPBORED_DMENU_LINES}";
+}
+
+sub dmenu_mode {
+ my $what = shift;
+
+ if($what eq 'vertical') {
+ return("-l 15");
+ }
+ else {
+ return("");
+ }
+}
+
+$normal_bg =~ s/^(#)/\\$1/;
+$normal_fg =~ s/^(#)/\\$1/;
+$select_bg =~ s/^(#)/\\$1/;
+$select_fg =~ s/^(#)/\\$1/;
+
+my $dmenu = "dmenu -i -b $list_mode "
+ . "-nb $normal_bg -nf $normal_fg -sb $select_bg -sf $select_fg";
+
+
+open(my $r_clips, '<', $xsels) or die("Cant open $xsels: $!");
+my @cliplist = <$r_clips>;
+close($r_clips);
+
+my @urls;
+for my $clip(@cliplist) {
+ if($clip =~ m;(https?://([-\w\.]+)+(:\d+)?(/([\w/_\.]*(\?\S+)?)?)?);) {
+ push(@urls, $1);
+ }
+}
+my $urls_to_print = join('\n', reverse(@urls));
+
+chomp(my $xclip = `which xclip`);
+
+do {
+ print STDERR "No xclip found. Please install.\n";
+ exit(1);
+} if(!defined($xclip));
+
+$xclip = "$xclip -i -l 0|$xclip -o";
+
+system("printf \"$urls_to_print\"|$dmenu -p url|$xclip");
diff --git a/down b/down
new file mode 100755
index 0000000..a91cf60
--- a/dev/null
+++ b/down
@@ -0,0 +1,14 @@
+#!/bin/zsh
+
+failed()
+{
+echo "Failed"
+exit 1
+}
+
+sync
+mount | grep loop | grep -v pacman && sudo umount /mnt*
+unmount-removables || failed
+sleep 2
+pidof tmux && ~/bin/tux killall force
+sleep 1
diff --git a/download b/download
new file mode 100755
index 0000000..baea2d4
--- a/dev/null
+++ b/download
@@ -0,0 +1,18 @@
+#!/bin/zsh
+histfile="$HOME/.download_history"
+if [[ $URL == *.torrent ]]
+then
+ pidof -s rtorrent &>/dev/null || ~/bin/tux tnum rtorrent
+ [[ $1 =~ ^http.* ]] && /usr/bin/wget --content-disposition -q $1 > ~/.rtorrent/watch/${1##*/} && echo "$URL ============ $(/usr/bin/aria2c --no-conf -S ~/.rtorrent/watch/${1##*/})===========$(date)" >> $histfile && exit
+ cp $URL ~/.rtorrent/watch/
+ notify-send "Torrent" "Download started for $(/usr/bin/aria2c --no-conf -S $URL | grep Name) "
+ echo "$URL ============ $(/usr/bin/aria2c --no-conf -S $URL | grep Name) ====================$(date)" >> $histfile
+ mv "$URL" ~/.torrents/
+
+else
+ gid=$(aria2rpc addUri "$URL")
+ notify-send "Aria" "Download started for $gid"
+ echo "$gid:$URL >===============< $(date)" >>| $histfile
+fi
+#vim set ts=4 sw=4 foldmethod=marker tw=80 noet:
diff --git a/downloadStats b/downloadStats
new file mode 100755
index 0000000..4b206e7
--- a/dev/null
+++ b/downloadStats
@@ -0,0 +1,15 @@
+#!/bin/zsh
+#setopt shwordsplit
+ariaStats=$(aria2mon | grep GID | perl -lne 'print $1 if /(ETA:.+)/') || ""
+torrentStats=$(~/.rtorrent/torrentStats ~/.rtorrent/xmlrpc2scgi.py ~/.rtorrent/rpc.socket 2>/dev/null | grep "Done:" | cut -d " " -f 2 | sed -e 's/^/:/' ) || ""
+([[ ! -z $ariaStats ]] || [[ ! -z $torrentStats ]]) && echo "${ariaStats} \\ ${torrentStats}"
+if [[ (! -t 1) && -z $1 ]];then
+ exit 0
+fi
+
+if [[ -z $ariaStats && -z $torrentStats ]];then
+ exit 1
+else
+ exit 0
+fi
+ #set +x
diff --git a/fetch_poster.py b/fetch_poster.py
new file mode 100755
index 0000000..8dec655
--- a/dev/null
+++ b/fetch_poster.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python2
+# -*- coding: utf8 -*-
+"""
+This Python script is intended to find the best possible poster/cover image
+for a video.
+
+Uses the following www-sites for scraping for the poster image (in this
+order):
+
+ movieposter.com
+ imdb.com
+
+Picks the largest (in pixels) vertical poster.
+
+Written by Pekka Jääskeläinen (gmail: pekka.jaaskelainen) 2007
+"""
+
+import urllib
+import re
+import tempfile
+import os
+import optparse
+import sys
+import imdbpy
+
+movie_poster_site = True
+try:
+ import BeautifulSoup
+except:
+ print """BeautifulSoup class is required for parsing the MoviePoster site.
+
+In Debian/Ubuntu it is packaged as 'python-beautifulsoup'.
+
+http://www.crummy.com/software/BeautifulSoup/#Download/"""
+ movie_poster_site = False
+
+imaging_library = True
+try:
+ import Image
+except:
+ print """Python Imaging Library is required for figuring out the sizes of
+the fetched poster images.
+
+In Debian/Ubuntu it is packaged as 'python-imaging'.
+
+http://www.pythonware.com/products/pil/"""
+ imaging_library = False
+
+#Number of default IMDb retry
+import time
+defaultretries=3
+
+def functionretry(func, arg1, arg2=None, retries=None):
+ global defaultretries
+
+ if retries == None:
+ retries = defaultretries
+
+ attempts = 0
+ stop = False
+ while (not stop):
+ try:
+ if arg2:
+ result = func(arg1, arg2)
+ else:
+ result = func(arg1)
+ stop = True
+ except:
+ result = None
+ if not stop:
+ attempts += 1
+ if attempts > retries:
+ stop = True
+ if attempts <= retries:
+ print 'Failed to retrieve data, retry in 5s'
+ time.sleep(5)
+
+ if attempts > retries:
+ print 'Error retrieving data : No more attempts'
+ return result
+
+class PosterImage:
+ """
+ Holds a single poster image.
+
+ Contains information of the resolution, location of the file in
+ the file system. etc.
+ """
+ width = 0
+ height = 0
+ file_name = None
+ def __init__(self, file_name):
+ self.file_name = file_name
+ try:
+ (self.width, self.height) = Image.open(file_name).size
+ except:
+ # The imaging lib is not installed or some other error.
+ # Do not take the size in account.
+ pass
+
+
+ def is_vertical(self):
+ return self.width < self.height
+
+ def pixels(self):
+ return self.width*self.height
+
+class PosterFetcher:
+ """
+ Base class for poster image fetchers.
+ """
+ def fetch(self, title_string, imdb_id = None):
+ """
+ Fetch and download to a local temporary filename movie posters
+ for the given title.
+
+ Return empty list in case no images was found.
+ """
+ pass
+
+ def download_image(self, image_url, extension=None):
+
+ (fid, local_filename) = tempfile.mkstemp(extension)
+ local_file = os.fdopen(fid, "wb")
+ local_file.write(urllib.urlopen(image_url).read())
+ local_file.close()
+ return PosterImage(local_filename)
+
+class MoviePosterPosterFetcher(PosterFetcher):
+ """
+ Fetches poster images from movieposter.com
+ """
+ def fetch(self, title_string, imdb_id = None):
+
+ poster_urls = self.title_search(title_string)
+ results = 0
+ max_results = 4
+ images = []
+
+ if poster_urls:
+ for url in poster_urls:
+ image_url = self.find_poster_image_url(url)
+ if image_url is not None:
+ images.append(self.download_image(image_url, ".jpg"))
+ results += 1
+ if results >= max_results:
+ break
+ return images
+
+ def find_poster_image_url(self, poster_page_url):
+ """
+ Parses the given poster page and returns an URL pointing to the poster
+ image.
+ """
+ #print "Getting",poster_page_url
+
+ soup = BeautifulSoup.BeautifulSoup(urllib.urlopen(poster_page_url))
+
+ imgs = soup.findAll('img', attrs={'src':re.compile('/posters/archive/main/.*')})
+
+ if len(imgs) == 1:
+ return "http://eu.movieposter.com/" + imgs[0]['src']
+ return None
+
+
+ def title_search(self, title_string):
+ """
+ Executes a title search on movieposter.com.
+
+ Returns a list of URLs leading to the page for the poster
+ for the given title_string.
+ """
+ params = urllib.urlencode(\
+ {'ti': title_string.encode("ascii", 'replace'),
+ 'pl': 'action',
+ 'th': 'y',
+ 'rs': '12',
+ 'size': 'any'})
+ opener = urllib.URLopener()
+ (filename, headers) = \
+ opener.retrieve("http://eu.movieposter.com/cgi-bin/mpw8/search.pl",
+ data=params)
+
+ f = open(filename, 'r')
+ results = f.read()
+ f.close()
+
+ return self.parse_title_search_results(results, title_string)
+
+ def parse_title_search_results(self, result_page, title_string):
+ """
+ Parses the result page of a title search on movieposter.com.
+
+ Returns a list of URLs leading to a page with poster for the given title.
+ """
+ search = title_string.lower()
+ soup = BeautifulSoup.BeautifulSoup(result_page)
+ divs = soup.findAll('div', attrs={'class':'pid'})
+ urls = []
+ for div in divs:
+ links = div.findAll('a')
+
+ if len(links) > 0:
+ for link in links:
+ # Skip the mailto links.
+ spl = link['href'].split(":")
+ if len(spl) > 1:
+ if spl[0].lower() == "mailto":
+ continue
+ title = link['title'].lower()
+ if title.endswith("poster"):
+ title = title[0:-len(" poster")]
+ if title == search:
+ urls.append(link['href'])
+ return urls
+
+
+class IMDbPosterFetcher(PosterFetcher):
+ """
+ Fetches poster images from imdb.com.
+ """
+ def fetch(self, title_string, imdb_id = None):
+
+ if imdb_id is None:
+ return []
+ poster_url = imdbpy.find_poster_url(imdb_id)
+ if poster_url is not None:
+ filename = poster_url.split("/")[-1]
+ (name, extension) = os.path.splitext(filename)
+ return [self.download_image(poster_url, extension)]
+ return []
+
+def find_best_posters(title, count=1, accept_horizontal=False, imdb_id=None, retries=None):
+
+ fetchers = [MoviePosterPosterFetcher(), IMDbPosterFetcher()]
+ #fetchers = [IMDbPosterFetcher()]
+ posters = []
+
+ # If it's a series title 'Sopranos, S06E14' then use just the series
+ # name for finding the poster. Strip the episode number.
+ (series_title, season, episode) = imdbpy.detect_series_title(title)
+ if series_title is not None and season is not None and episode is not None:
+ title = series_title.strip()
+ if title.endswith(","):
+ title = title[0:-1]
+
+ # Drop 'The" etc.
+ preps = ["the", "a" , "an", "die", "der"]
+ for prep in preps:
+ if title.lower().startswith(prep + " "):
+ title = title[len(prep + " "):]
+ break
+
+ for fetcher in fetchers:
+ new_posters = functionretry(fetcher.fetch, title, arg2=imdb_id, retries=retries)
+ if new_posters:
+ for poster in new_posters:
+ if not accept_horizontal and not poster.is_vertical():
+ os.remove(poster.file_name)
+ continue
+ posters.append(poster)
+
+ def size_cmp(a, b):
+ return cmp(a.pixels(), b.pixels())
+
+ posters.sort(size_cmp)
+ posters.reverse()
+
+ for small_poster in posters[count:]:
+ os.remove(small_poster.file_name)
+
+ return posters[0:count]
+
+def main():
+ global defaultretries
+
+ p = optparse.OptionParser()
+ p.add_option('--number', '-n', action="store", type="int", default=1,
+ help="the count of biggest posters to get")
+ p.add_option('--all', '-a', action="store_true", default=False,
+ help="accept all posters, even horizontal ones")
+ p.add_option('--poster_search', '-P', metavar='IMDB_ID', default=None, dest="imdb_id",
+ help="Displays a list of URL's to movie posters. The lines are "\
+ "ranked by descending value. For MythVideo.")
+ p.add_option('--retry', '-t', action="store", type="int", dest="retries",default=3,
+ help="Number of retries, 0 means no retry [default 3]")
+
+ options, arguments = p.parse_args()
+
+ defaultretries = options.retries
+
+ title = ""
+ if len(arguments) != 1:
+ if options.imdb_id:
+ # TODO: Fetch the title from IMDb.
+ metadata = functionretry(imdbpy.metadata_search,options.imdb_id)
+ if metadata:
+ title = imdbpy.parse_meta(metadata, "Title")
+ else:
+ print "Error can't retrieve title from IMDb"
+ sys.exit(1)
+ else:
+ print "Please give a video title as argument."
+ sys.exit(1)
+ else:
+ title = arguments[0]
+
+ posters = find_best_posters(title, options.number, options.all,
+ imdb_id=options.imdb_id, retries=defaultretries)
+
+ if options.imdb_id is not None:
+ for poster in posters:
+ print "%s" % poster.file_name
+ else:
+ for poster in posters:
+ print "%s [%dx%d] vertical: %s " % \
+ (poster.file_name, poster.width,
+ poster.height, poster.is_vertical())
+
+if __name__ == '__main__':
+ main()
+
diff --git a/fincore b/fincore
new file mode 100755
index 0000000..167186b
--- a/dev/null
+++ b/fincore
@@ -0,0 +1,347 @@
+#! /usr/bin/perl
+
+# fincore - File IN CORE: show which blocks of a file are in core
+# Copyright (C) 2007 Dave Plonka
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+# $Id: fincore,v 1.9 2007/05/23 21:17:52 plonka Exp $
+# Dave Plonka, Apr 5 2007
+
+use Inline C;
+use strict;
+use FindBin;
+use Getopt::Long;
+use Pod::Usage;
+use POSIX; # for sysconf
+
+my %opt;
+
+# { CONFIGURATION SECTION BEGIN ################################################
+
+$opt{s} = 0;
+
+# } CONFIGURATION SECTION END ##################################################
+
+GetOptions('help' => \$opt{h},
+ 'man' => \$opt{m},
+ 'summary!' => \$opt{s},
+ 'justsummarize!' => \$opt{S},
+ 'stdin' => \$opt{I}) or pod2usage(2);
+
+pod2usage(0) if ($opt{h});
+pod2usage(-exitstatus => 0, -verbose => 2) if $opt{m};
+
+pod2usage(2) if (0 == @ARGV and !$opt{I});
+
+if ($opt{S}) {
+ $opt{s} = 1;
+}
+
+my @files;
+if ($opt{I}) {
+ @files = grep { chomp } <STDIN>;
+} else {
+ @files = @ARGV;
+}
+
+my $pageSize = POSIX::sysconf(&POSIX::_SC_PAGESIZE);
+print "page size: $pageSize bytes\n" if $opt{s};
+
+my $filesProcessed = 0;
+my $totalPages = 0;
+foreach my $file (@files) {
+ if (!stat($file)) {
+ warn("$file: $!\n");
+ next;
+ }
+ my @values = fincore($file);
+ if (@values) {
+ $totalPages += @values;
+ printf("%s: %u incore page%s: @values\n",
+ $file, scalar(@values), (1 == @values)? "" : "s") unless $opt{S};
+ } else {
+ print "$file: no incore pages.\n" unless $opt{S};
+ }
+ $filesProcessed++;
+}
+
+if ($opt{s}) {
+ if ($filesProcessed) {
+ printf("%.0f page%s, %sbytes in core for %u file%s; " .
+ "%.2f page%s, %sbytes per file.\n",
+ $totalPages, (1 == $totalPages)? "" : "s",
+ scale("%.1f", $totalPages*$pageSize),
+ $filesProcessed, (1 == $filesProcessed)? "" : "s",
+ $totalPages/$filesProcessed,
+ (1. == $totalPages/$filesProcessed)? "" : "s",
+ scale("%.1f", ($totalPages*$pageSize)/$filesProcessed));
+ }
+}
+
+exit;
+
+################################################################################
+
+sub scale($$) { # This is based somewhat on Tobi Oetiker's code in rrd_graph.c:
+ my $fmt = shift;
+ my $value = shift;
+ my @symbols = ("a", # 10e-18 Ato
+ "f", # 10e-15 Femto
+ "p", # 10e-12 Pico
+ "n", # 10e-9 Nano
+ "u", # 10e-6 Micro
+ "m", # 10e-3 Milli
+ " ", # Base
+ "k", # 10e3 Kilo
+ "M", # 10e6 Mega
+ "G", # 10e9 Giga
+ "T", # 10e12 Terra
+ "P", # 10e15 Peta
+ "E");# 10e18 Exa
+
+ my $symbcenter = 6;
+ my $digits = (0 == $value)? 0 : floor(log($value)/log(1024));
+ return sprintf(${fmt} . " %s", $value/pow(1024, $digits),
+ $symbols[$symbcenter+$digits])
+}
+
+################################################################################
+
+__END__
+
+=head1 NAME
+
+fincore - File IN CORE: show which blocks of a file are in core
+
+=head1 SYNOPSIS
+
+fincore [options] <-stdin | file [...]>
+
+ Options:
+ -help - brief help message
+ -man - full documentation
+ -summary - report summary statistics for the files
+ -justsummarize - just report summary statistics for the files
+ -stdin - read file names from standard input
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<-help>
+
+Shows usage information and exits.
+
+=item B<-man>
+
+Shows the manual page and exits.
+
+=item B<-summary>
+
+Report summary statistics for the files.
+
+=item B<-nosummary>
+
+Don't report summary statistics for the files.
+This is the default.
+
+=item B<-justsummarize>
+
+Just report summary statistics for the files.
+I.e. don't show details for each file.
+
+=item B<-nojustsummarize>
+
+Don't just report summary statistics for the files.
+This is the default.
+
+=item B<-stdin>
+
+Read file names from standard input.
+This is to avoid "Arg list too long" with very many files.
+
+=back
+
+=head1 DESCRIPTION
+
+B<fincore> is a command that shows which pages (blocks) of a file are
+in core memory.
+
+It is particularly useful for determining the contents of the
+buffer-cache. The name means "File IN CORE" and I pronounce it
+"eff in core".
+
+=head1 EXAMPLES
+
+ $ fincore foo.rrd
+ foo.rrd: no incore pages.
+
+ $ cat foo.rrd >/dev/null # read the whole file
+ $ fincore foo.rrd
+ foo.rrd: 26 incore pages: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
+
+ $ ls |grep '\.rrd$' |~/perl/fincore --stdin --justsummarize
+ page size: 4096 bytes
+ 2214049 pages, 8.4 Gbytes in core for 268994 files; 8.23 pages, 32.9 kbytes per file.
+
+=head1 BUGS
+
+In verbose mode, you may get an error from mincore such as "cannot
+allocate memory" if the file size is zero.
+
+Some operating systems have posix_fadvise, but it doesn't work.
+For instance under Linux 2.4, you may see this error:
+
+ posix_fadvise: Inappropriate ioctl for device
+
+=head1 AUTHOR
+
+Dave Plonka <[email protected]>
+
+Copyright (C) 2007 Dave Plonka.
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+=head1 VERSION
+
+This is fincore B<$Revision: 1.9 $>.
+
+=head1 SEE ALSO
+
+The B<fadvise> command.
+
+=cut
+
+__C__
+#define PERL_INLINE /* undef this to build the C code stand-alone */
+
+/* { POSIX stuff */
+#include <errno.h> /* errno */
+#include <fcntl.h> /* fcntl, open */
+#include <stdio.h> /* perror, fprintf, stderr, printf */
+#include <stdlib.h> /* exit, calloc, free */
+#include <string.h> /* strerror */
+#include <sys/stat.h> /* stat, fstat */
+#include <sys/types.h> /* size_t */
+#include <unistd.h> /* sysconf, close */
+/* } */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+/* fincore -
+ */
+void
+fincore(char *filename) {
+ int fd;
+ struct stat st;
+ void *pa = (char *)0;
+ char *vec = (char *)0;
+ register size_t n = 0;
+ size_t pageSize = getpagesize();
+ register size_t pageIndex;
+# ifdef PERL_INLINE
+ INLINE_STACK_VARS;
+# endif
+
+# ifdef PERL_INLINE
+ INLINE_STACK_RESET;
+# endif
+
+ fd = open(filename, 0);
+ if (0 > fd) {
+ perror("open");
+# ifdef PERL_INLINE
+ INLINE_STACK_VOID;
+# endif
+ return;
+ }
+
+ if (0 != fstat(fd, &st)) {
+ perror("fstat");
+ close(fd);
+# ifdef PERL_INLINE
+ INLINE_STACK_VOID;
+# endif
+ return;
+ }
+
+ pa = mmap((void *)0, st.st_size, PROT_NONE, MAP_SHARED, fd, 0);
+ if (MAP_FAILED == pa) {
+ perror("mmap");
+ close(fd);
+# ifdef PERL_INLINE
+ INLINE_STACK_VOID;
+# endif
+ return;
+ }
+
+ /* vec = calloc(1, 1+st.st_size/pageSize); */
+ vec = calloc(1, (st.st_size+pageSize-1)/pageSize);
+ if ((void *)0 == vec) {
+ perror("calloc");
+ close(fd);
+# ifdef PERL_INLINE
+ INLINE_STACK_VOID;
+# endif
+ return;
+ }
+
+ if (0 != mincore(pa, st.st_size, vec)) {
+ /* perror("mincore"); */
+ fprintf(stderr, "mincore(%p, %lu, %p): %s\n",
+ pa, (unsigned long)st.st_size, vec, strerror(errno));
+ free(vec);
+ close(fd);
+# ifdef PERL_INLINE
+ INLINE_STACK_VOID;
+# endif
+ return;
+ }
+
+ /* handle the results */
+ for (pageIndex = 0; pageIndex <= st.st_size/pageSize; pageIndex++) {
+ if (vec[pageIndex]&1) {
+# ifndef PERL_INLINE /* { */
+ printf("%lu\n", (unsigned long)pageIndex);
+# else /* }{ */
+ /* return the results on perl's stack */
+ INLINE_STACK_PUSH(sv_2mortal(newSVnv(pageIndex)));
+ n++;
+# endif /* } */
+ }
+ }
+
+ free(vec);
+ vec = (char *)0;
+
+ munmap(pa, st.st_size);
+ close(fd);
+
+# ifdef PERL_INLINE
+ INLINE_STACK_DONE;
+# endif
+
+# ifdef PERL_INLINE
+ INLINE_STACK_RETURN(n);
+# endif
+ return;
+}
diff --git a/flplay b/flplay
new file mode 100755
index 0000000..da33d22
--- a/dev/null
+++ b/flplay
@@ -0,0 +1,12 @@
+#!/bin/zsh
+fpid=$(pidof plugin-container)
+
+fd=$(lsof | grep $fpid | /bin/grep '(deleted)' | /bin/grep FlashX | /bin/grep -o '[0-9]*u ' | head -1)
+
+fd=$fd[1,-3]
+
+print /proc/$fpid/fd/$fd
+
+[[ -n $1 ]] && exit
+
+mplayer /proc/$fpid/fd/$fd
diff --git a/forward b/forward
new file mode 100755
index 0000000..a1c6a91
--- a/dev/null
+++ b/forward
@@ -0,0 +1,16 @@
+#!/bin/zsh
+set -x
+
+failed(){
+echo "Failed!"
+exit 1
+}
+
+#PORT=$(( (RANDOM+2000)%65536 ))
+PORT=14770
+ps auxww | =grep -i wnohang | =grep -i ssh | =grep -v grep || lsof -i:$PORT || ssh -f -i ~/.ssh/authorized_keys -p 8193 -ND $PORT [email protected] || failed
+#export HTTP_PROXY="127.0.0.1:$PORT"
+tsocks luakit http://lwn.net
+
+
+set +x
diff --git a/functions b/functions
new file mode 100644
index 0000000..067019a
--- a/dev/null
+++ b/functions
@@ -0,0 +1,610 @@
+pronounce(){
+word="${@:-$(xclip -o)}"
+if [[ ! -f $XDG_CACHE_HOME/pronounce/$word ]];then
+ wget -qO- $(wget -qO- "http://dictionary.reference.com/browse/$word" | grep 'soundUrl' | head -n 1 | sed 's|.*soundUrl=\([^&]*\)&.*|\1|' | sed 's/%3A/:/g;s/%2F/\//g') | tee $XDG_CACHE_HOME/pronounce/$word | /usr/bin/mplayer -really-quiet -cache 8192 -
+else
+ /usr/bin/mplayer $XDG_CACHE_HOME/pronounce/$word
+fi
+}
+
+function translate(){
+wget -qO - "http://ajax.googleapis.com/ajax/services/language/translate?langpair=|en&v=1.0&q=`xsel`" |cut -d \" -f 6
+}
+
+
+function clik(){
+ scrot -s '%Y-%m-%d-%s_$wx$h.png' -e 'mv $f ~/.scrshots/; notify-send "Scrot" "$f done!"'
+}
+
+function mpl()
+{
+$HOME/bin/mplayer "[email protected]"
+}
+
+function etym(){
+/usr/bin/w3m "http://www.etymonline.com/index.php?search=$word&searchmode=none"
+}
+
+function getmovie (){
+
+ /usr/bin/get_movie.py `/usr/bin/search_movie.py "[email protected]" | head -3 | tail -1 | cut -d ":" -f 2`
+
+}
+
+function cnkt(){
+export TERM=xterm
+dtach -A $HOME/.cache/1080 -E ssh [email protected] -L 1080:socks.yahoo.com:1080
+#tmux new-session -s cnkt ssh [email protected] -L 1080:socks.yahoo.com:1080
+}
+
+function randbash(){
+curl -s http://bash.org/?random1|grep -oE "<p class=\"quote\">.*</p>.*</p>"|grep -oE "<p class=\"qt.*?</p>"|sed -e 's/<\/p>/\n/g' -e 's/<p class=\"qt\">//g' -e 's/<p class=\"qt\">//g'|perl -ne 'use HTML::Entities;print decode_entities($_),"\n"'| head -1
+}
+
+
+function ifind(){
+find . -iname "*[email protected]*"
+}
+
+mountole(){
+
+ keyctl list @u | grep -q b8d || ecryptfs-add-passphrase #&>/dev/null
+ keyctl list @u | grep -q b8d && mount | grep yie | grep -v grep || mount -i /home/raghavendra/wormole
+ #sudo sed -i -e '/.*ecryptfs.*/s|^/\(.*\)$|#&|g' /etc/fstab 1>/dev/null
+ if [[ $? = "0" ]];then
+ echo "Done!"
+ else
+ echo "Failed :( "
+ return 1
+ fi
+ ssh-expect
+}
+
+
+geoip () { curl -s "http://www.geoiptool.com/?IP=$1" | html2text | egrep --color 'City:|IP Address:|Country:'; }
+
+bbcradio() { local s;echo "Select a station:";select s in 1 1x 2 3 4 5 6 7 "Asian Network an" "Nations & Local lcl";do break;done;s=($s);/usr/bin/mplayer -playlist "http://www.bbc.co.uk/radio/listen/live/r"${s[@]: -1}".asx";}
+
+
+function def(){
+ wn $1 -over
+ if [ $? != "0" ];then echo $1 >> ~/wordlist;fi
+}
+
+
+function igrep(){
+FILENAME=$1
+PATTERN=$2
+find . -iname "$FILENAME" -exec grep -il "$PATTERN" {} \;
+}
+
+function lookup(){
+links -dump http://wordnet.princeton.edu/perl/webwn?s=$1 | grep -i --word-regexp --color=always -B 2 -A 5 $1 | sed '$d'
+}
+
+
+function stemp() {
+echo -n `acpi -t | awk -F ',' '{ print $2 }' | cut -d " " -f 2`
+echo " `nvidia-smi -a |& grep Temp | cut -c17-19`"
+echo `hddtemp /dev/sda`
+ }
+
+
+
+function rcstart(){
+for d in [email protected];do
+ if [[ -f /var/run/daemons/$d ]];then
+ sudo /etc/rc.d/$d stop; sleep 2; sudo /etc/rc.d/$d start
+ else
+ sudo /etc/rc.d/$d start
+ fi
+done
+}
+
+function pkgsearch(){
+#set -x
+if [[ $# < 1 ]];then
+ echo "pkgsearch term pkglist"
+ return 1
+fi
+term="$1"
+shift
+for i in $pkg
+do
+ for j in `pacman -Ql $i | cut -d " " -f 2`
+ do
+ grep -l -i $term $j
+ done
+done
+#set +x
+}
+
+function cscore()
+{
+ curl -s -e "http://www.google.com" -A "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.1) Gecko/20061204 Firefox/2.0.0.1" "http://livechat.rediff.com:80/sports/score/score.txt"
+}
+
+function wpack(){
+pacman -Q | grep "[email protected]"
+}
+
+function max(){
+~/bin/mplayeraux "[email protected]"
+}
+
+function rsize() {
+wget --spider "[email protected]" -o /tmp/wget_tmp; grep -i Length /tmp/wget_tmp; rm /tmp/wget_tmp
+}
+
+function buildabs()
+{
+ ABDIR="$HOME/Arch/repo"
+ abpath=$(find -O3 $ABDIR -maxdepth 2 -name "[email protected]" -type d | head -1)
+ [[ -z $abpath ]] && return 1
+ cp -R $abpath /dev/shm/ || return 1
+ pushd /dev/shm/$1/repos/*-x86_64
+ #clear
+}
+
+function vpdf(){
+pdftohtml -q -stdout -noframes "[email protected]" | /usr/bin/w3m -T text/html
+}
+
+function view()
+{
+if [[ -z "[email protected]" ]];then
+ ranger ~/Documents
+elif [[ $1 == http:* ]];then
+ url=${1#*=} # for urls like http://docs.google.com/viewer?url=<pdfurl>
+ dest="$HOME/Documents/${1##*/}"
+ wget -O - -q $url > $dest
+ detach $VIEWER -r 143 $dest
+else
+ detach $VIEWER -r 143 "[email protected]"
+fi
+}
+
+function rcstop(){
+for d in [email protected];do
+ sudo /etc/rc.d/$d stop
+done
+}
+gdb_get_backtrace() {
+ local exe=$1
+ local core=$2
+
+ gdb ${exe} \
+ --core ${core} \
+ --batch \
+ --quiet \
+ -ex "thread apply all bt full" \
+ -ex "quit"
+}
+
+function {sprunge,spaste}(){
+ echo "sure y/N"
+ read -q || return 1
+ if [[ -z $1 ]];then
+ if [[ -t 0 ]];then
+ url=$(xclip -o | curl -s -F 'sprunge=<-' http://sprunge.us)
+ else
+ url=$(curl -s -F 'sprunge=<-' http://sprunge.us)
+ fi
+ echo $url
+ else
+ for f in [email protected];do
+ url=$(curl -s -F 'sprunge=<-' http://sprunge.us < $f)
+ echo $url
+ done
+ fi
+ echo -n $url | xclip -i
+}
+
+
+
+function whoru() { echo "Path:"`which [email protected]`; which =$1 | xargs -i pacman -Qo {} ; }
+
+function fdz() { ls -lth | egrep "[0-9](M|G)" | sort -k 5 -n | cut -d " " -f 5,6-; }
+
+function dz() { find . -maxdepth $1 -type d -exec du {} \; | sort -n | awk '{ print $2,"\t",$1/1024,"MB" }' | uniq; }
+function psp() { ps auxww | /bin/grep -i "[email protected]"; }
+function svim() { sudo vim "[email protected]";}
+
+
+
+define() {
+ local LNG=$(echo $LANG | cut -d '_' -f 1)
+ local CHARSET=$(echo $LANG | cut -d '.' -f 2)
+ lynx -accept_all_cookies -dump -hiddenlinks=ignore -nonumbers -assume_charset="$CHARSET" -display_charset="$CHARSET" "http://www.google.com/search?hl=${LNG}&q=define%3A+${1}&btnG=Google+Search" | grep -m 5 -C 2 -A 5 -w "*" > /tmp/define
+
+ if [ ! -s /tmp/define ]; then
+ echo "Sorry, google doesn't know this one..."
+ rm -f /tmp/define
+ return 1
+ else
+ #mecho "$(cat /tmp/define | grep -v Search )" | colorit $1
+ echo "$(cat /tmp/define | grep -v Search )" | colorit $1
+ echo ""
+ fi
+ rm -f /tmp/define
+ echo ">=========================>"
+ aiksaurus $1
+ echo $1 >> ~/wordlist
+ return 0
+}
+
+
+function myip(){
+upnpc -s | grep External | cut -d "=" -f 2
+}
+
+webman(){
+ manum=$1
+ term=${2:-$1}
+ if [[ -z $2 ]];then
+ output=(${(@)$(curl -s -o /dev/null --write-out \
+ "%{url_effective}:%{http_code}\n" \
+ "http://linux.die.net/man/[1-9]/$term")}) && \
+ url=$output[(r)*200]
+ url=${url%:*}
+ manum=$url[(ws:/:)-2]
+ else
+ output=$(curl -s -o /dev/null --write-out \
+ "%{url_effective}:%{http_code}\n" \
+ "http://linux.die.net/man/$manum/$term")
+ url="http://linux.die.net/man/$manum/$term"
+
+ fi
+ [[ -z $url ]] && return
+ /usr/bin/w3m $url
+ /usr/bin/w3m -dump -T text $url > ~/.helpers/man/man$manum/$term.$manum
+}
+
+man(){
+ if [[ ! -z $2 || -z $TMUX ]];then /usr/bin/man "[email protected]" ; return; fi
+ /usr/bin/man -w [email protected] &>/dev/null || return $?
+ tmux split-window -p 83 "LC_ALL=C LANG=C exec /usr/bin/man [email protected]"
+}
+
+
+append(){
+ echo "Enter stuff"
+ cat >>$1
+}
+
+
+sendmail() {
+ #"msmtp -C $HOME/.msmtprc"
+ subject="$1"
+ mailto=$2
+ # It will ask for body -- much better
+ /usr/bin/mutt -s "$subject" $mailto # < "[email protected]"
+}
+
+
+
+tmpwrite(){
+ $file = mktemp
+ /usr/bin/vim $file
+ mv $file $file.d
+}
+
+apod(){
+ DATE=`date +%s`
+ FILE="$HOME/.cache/apod/apod:$DATE.jpg"
+ apodTemp=$(mktemp)
+ curl -s http://apod.nasa.gov/apod/ >| $apodTemp
+ apodDesc=$(perl -lne 'print $_ if /Explanation:/../Tomorrow/' $apodTemp | /usr/bin/w3m -dump -T text/html | head -n -1)
+ url="$(grep IMG $apodTemp | cut -d "\"" -f 2)"
+ wget -q -O "$FILE" -c http://apod.nasa.gov/apod/${=url}
+ /usr/bin/perlbin/vendor/exiftool -overwrite_original -UserComment="$apodDesc" $FILE
+ feh $FILE &
+ echo $apodDesc | par f | gxmessage -file - #-timeout 30
+ echo "Set as wallpaper: y/n"
+ read -q && set_wall $FILE
+}
+
+server(){
+if [[ $1 == start ]];then
+ python -m SimpleHTTPServer 9914 &>~/logs/server.log &
+else
+ $TBROWSER http://localhost:9914/
+fi
+}
+
+loopit(){
+ iso="$1"
+ [[ -z $iso ]] && return 1
+ mntpt=""
+ for x in /mnt*;do
+ if ! mountpoint -q $x;then
+ mntpt=$x
+ break
+ fi
+ done
+ if sudo mount -o loop $iso $mntpt;then
+ echo $mntpt
+ else
+ echo "Failed $mntpt"
+ return 1
+ fi
+}
+
+isurl(){
+ curl -I -s "$url" &>/dev/null || return 1
+ return 0
+}
+
+mecho(){
+ #detect if being invoked through dmenu or terminal - use zenity or stdout
+ #based on that
+ if [[ -t 1 ]];then
+ output="cat"
+ else
+ output="zenity --text-info --height 400 --width 680"
+ fi
+ echo "[email protected]" | ${=output}
+}
+
+
+task()
+{
+if [[ -z $1 || $1 == "add" ]];then
+ echo "Name"
+ read name
+ file="$HOME/.tasks/${name// /-}"
+ #touch $file
+ echo "Description:"
+ read desc
+ echo "$desc" >! "${=file}.remain"
+else
+ echo "Task list"
+ counter=1
+ for i in $(print ~/.tasks/*.remain);do
+ echo -e "\n\nTask #$counter: ${=i}"
+ echo "Description:"
+ cat "${=i}"
+ echo -e "\n\n"
+ echo "Is this task done?(y/N)"
+ read -q && mv ${=i} ${=i:r}.done
+ counter=$(($counter+1))
+ done
+fi
+
+}
+
+comcache(){
+export PATH="/usr/lib/ccache/bin/:$PATH"
+}
+
+ad2que(){
+ trap 'break' INT
+ local tset
+ local input
+ while :;do
+ read $input
+ if ! which =$input[(w)1] &>/dev/null;then
+ input="shellrun $input"
+ fi
+ tset="$tset
+ $input"
+ done
+ echo "$tset" >> ~/.dque
+}
+
+
+xpaste(){
+ file="$1"
+ [[ -z $file ]] && file="$HOME/note"
+ var="$(xsel -o)"
+ echo -e "${(qq)var}" >>| ~/.current >>| $file
+ notify-send "Added note" "${(qq)var}"
+}
+
+wiki() { w=$(dig +short txt "[email protected]".wp.dg.cx); echo "$w" }
+
+
+
+dfilter(){
+ # Should return array
+ dchoice=$( eval [email protected] | awk '{ print NR,$0 }' | ${=DMENU} | awk '{ print $2 }')
+ echo "$dchoice"
+
+}
+
+mangrep(){
+ /usr/bin/man "$1" | grep -5 "$2"
+}
+
+
+help(){ run-help "[email protected]" || less -p "[email protected]" ~/.zshguide.txt }
+
+disassemble() {
+ emulate -L zsh
+ gcc -pipe -S -o - -O -g $* | as -aldh -o /dev/null
+ }
+
+profile() {
+ ZSH_PROFILE_RC=1 $SHELL "[email protected]"
+}
+
+
+xev(){
+ =xev -id $(xdotool getactivewindow) | sed -ne '/^KeyPress/,/^$/p'
+}
+
+
+rsudo(){
+echo "[email protected]" | sudo tee
+}
+
+search(){
+ surfraw google "[email protected]"
+}
+
+ins(){
+ if [[ $1 == *xz || $1 == *pkg.tar ]];then
+ sudo pacman -U $1
+ else
+ sudo pacman -S "[email protected]"
+ fi
+}
+
+
+extract() {
+ if [[ -z "$1" ]] ; then
+ print -P "usage: \e[1;36mextract\e[1;0m < filename >"
+ print -P " Extract the file specified based on the extension"
+ elif [[ -f $1 ]] ; then
+ case ${(L)1} in
+ *.tar.bz2) tar -jxvf $1 ;;
+ *.tar.xz|*.xz) tar -xvf $1 ;;
+ *.tar.gz) tar -zxvf $1 ;;
+ *.bz2) bunzip2 $1 ;;
+ *.gz) gunzip $1 ;;
+ *.jar) unzip $1 ;;
+ *.rar) unrar x $1 ;;
+ *.tar) tar -xvf $1 ;;
+ *.tbz2) tar -jxvf $1 ;;
+ *.tgz) tar -zxvf $1 ;;
+ *.zip) unzip $1 ;;
+ *.Z) uncompress $1 ;;
+ *) echo "Unable to extract '$1' :: Unknown extension"
+ esac
+ else
+ echo "File ('$1') does not exist!"
+ fi
+
+}
+
+pacnews()
+{
+ for n in /etc/**/*.pacnew; do
+ if [[ -r ${n%.*} ]]; then
+ sudo vimdiff $n ${n%.*} &&
+ echo "Replace?"; read -q && sudo mv $n ${n%.*}
+ fi
+ done
+}
+
+dlink(){
+ curl -s "[email protected]" | /usr/bin/w3m -dump -T text/html
+}
+
+H-Glob() {
+ echo -e "
+ / directories
+ . plain files
+ @ symbolic links
+ = sockets
+ p named pipes (FIFOs)
+ * executable plain files (0100)
+ % device files (character or block special)
+ %b block special files
+ %c character special files
+ r owner-readable files (0400)
+ w owner-writable files (0200)
+ x owner-executable files (0100)
+ A group-readable files (0040)
+ I group-writable files (0020)
+ E group-executable files (0010)
+ R world-readable files (0004)
+ W world-writable files (0002)
+ X world-executable files (0001)
+ s setuid files (04000)
+ S setgid files (02000)
+ t files with the sticky bit (01000)
+
+ print *(m-1) # Files modified up to a day ago
+ print *(a1) # Files accessed a day ago
+ print *(@) # Just symlinks
+ print *(Lk+50) # Files bigger than 50 kilobytes
+ print *(Lk-50) # Files smaller than 50 kilobytes
+ print **/*.c # All *.c files recursively starting in \$PWD
+ print **/*.c~file.c # Same as above, but excluding 'file.c'
+ print (foo|bar).* # Files starting with 'foo' or 'bar'
+ print *~*.* # All Files that do not contain a dot
+ chmod 644 *(.^x) # make all plain non-executable files publically readable
+ print -l *(.c|.h) # Lists *.c and *.h
+ print **/*(g:users:) # Recursively match all files that are owned by group 'users'
+ echo /proc/*/cwd(:h:t:s/self//) # Analogous to >ps ax | awk '{print $1}'<"
+}
+#alias help-zshglob=H-Glob
+function lsnew () {
+ if [[ $1 = "" ]]; then
+ lsnew_glob="*"
+ else
+ lsnew_glob=$1
+ fi
+ ls -tr -dl $~lsnew_glob(om[1,30])
+}
+alias lsnew='noglob lsnew'
+
+
+
+#function {news,mutt,ncm,vim,weechat,weechat-curses,newsbeuter,ncmpcpp,atop,rtorrent,torrent} {
+function {news,mutt,ncm,weechat,weechat-curses,newsbeuter,ncmpcpp,atop,rtorrent,torrent} {
+ : ${(AA)muxaliases:=${(z)MUXAL}}
+ if [[ -z $TMUX || $1 == -f ]];then
+ [[ $1 == -f ]] && shift
+ if test $muxaliases[$0];then
+ =$muxaliases[$0] "[email protected]"
+ else
+ fi
+ return
+ fi
+ app=$0
+
+ if [[ $app == vim ]];then
+ vimote tabnew [email protected]
+ return
+ fi
+
+ : ${(AA)appMap:=${(z)WSETS}}
+
+ if [[ -z $1 ]];then
+ sess=$(tux takeme $app)
+ tmux switch-client -t $sess
+ return
+ fi
+
+ winum=$(~/bin/tux tnum $app)
+
+ if [[ -z $winum || $? != 0 ]];then
+ print "$0 = Dead | Something ==> recreated in its session"
+ return 5
+ fi
+
+ tmux join-pane -s $appMap[$app]:$winum -p 80
+ if [[ $? -ne 0 ]];then
+ fi
+
+# if [[ -n $1 ]];then
+# $muxaliases[$app] [email protected]
+# fi
+}
+
+function {reboot,sdown} {
+ typeset -A map
+ map=(sdown '/sbin/shutdown -h now' reboot 'command reboot')
+ command=$0
+ echo "Sure y/N"
+ if read -q;then
+ ~/bin/down
+ eval "$=map[$command]"
+ fi
+}
+
+viewjson(){
+ python -mjson.tool < $1
+}
+
+viewlite(){
+ sqlite3 $1 .dump
+}
+# vim: set ft=sh ts=4 sw=4 foldmethod=syntax tw=80 noet:
diff --git a/fxr b/fxr
new file mode 100755
index 0000000..021c2f8
--- a/dev/null
+++ b/fxr
@@ -0,0 +1,90 @@
+#!/usr/bin/perl
+# Testing
+#firefox-remote
+#http://trapd00r.se/all/os/2010/05/19/control-firefox-remotely/
+use strict;
+use encoding 'utf-8';
+use Data::Dumper;
+use WWW::Mechanize;
+use WWW::Mechanize::Firefox;
+
+my $w = WWW::Mechanize->new(
+ agent_alias => 'Windows Mozilla',
+);
+
+my $m = WWW::Mechanize::Firefox->new(
+ #launch => 'firefox',
+ tab => 'current',
+ agent_alias => 'Linux Mozilla',
+);
+
+
+{
+ no strict;
+ my $options = {
+ load => sub {$u = shift; $m->get($u);},
+ refresh => sub {$m->reload([BYPASS_CACHE]);},
+ back => sub {$m->back;},
+ forward => sub {$m->forward;},
+ uri => sub {print $m->uri,"\n";},
+ save => sub {$f = $_[0];$m->save_content($f);},
+ #save => sub {$f = shift // $m->uri.'.html';$m->save_content($f);},
+ info => sub {fmt($m->uri, $m->title,$m->content_type, onoff($m->is_html));},
+ links => sub {extrurl();},
+ click => sub {$m->click;},
+ pic => sub {pic()},
+ dumpimg => sub {$w->get($m->uri) && dumpimg()},
+ local => sub {$m->get_local($option2);},
+
+# showclick => sub {extrhash($m->clickables)},
+ };
+ print "OPTIONS:\n";
+ print " $_\n" for sort(keys(%$options));
+ }
+ my ($option,$option2) = @ARGV;
+ defined $options->{$option} && $options->{$option}->($option2);
+}
+
+sub dumpimg {
+ #foreach my $img($w->links) {
+ # if($img->url_abs =~ /.+\.png|jpg|jpeg|gif/) {
+ # print $img->url_abs, "\n";
+ # }
+ # else {
+ # print "foo: ", $img->url_abs, "\n";
+ # }
+ #}
+ print $_->url_abs, "\n" for ($w->find_all_images);
+}
+
+sub pic {
+ my $url = $m->uri;
+ my $img = $m->content_as_png;
+ my $fname = $m->title.'.png';
+ $fname =~ s/:/-/g;
+ $fname =~ s/\s+/_/g;
+ $fname =~ s#/|\\|\||'|"##; # no /|\'"? :
+ open(my $fh, '>', $fname) or die "Cant save to $fname: $!";
+ binmode($fh);
+ print $fh $img;
+ close $fh;
+ print "$url saved to $fname\n";
+}
+
+sub fmt {
+ my @text = @_;
+ print "$_\n" for @text;
+}
+
+sub extrurl {
+ my @links = $m->find_all_links;
+ print $_->url_abs,"\n" for @links
+}
+
+sub onoff {
+ my $i = shift;
+ $i = 'yes' if $i > 0;
+ $i = 'no' if $i == 1;
+ return $i;
+}
diff --git a/getimap b/getimap
new file mode 100755
index 0000000..7f2845c
--- a/dev/null
+++ b/getimap
@@ -0,0 +1,27 @@
+#!/bin/bash
+#exit
+if ! mount | grep -q wormole;then
+ echo "Booh.... "
+ exit
+fi
+
+if ps auxww | grep -q getmail | grep -v grep;then
+ exit
+fi
+
+
+echo "#########################`date`####################" >> ~/logs/getmail.log
+
+
+left=$(/bin/df -h | grep home | head -1 | awk '{ print $5 }' | tr -d '%')
+
+if [[ $left > 90 ]];then
+ echo "Low on space -- quitting" >> ~/logs/getmail.log
+ exit 1
+fi
+
+notify-send "Mailman" "....Fetching mails."
+
+setlock -X -n /tmp/locks/getmail getmail -n -q --rcfile=getmailrc.gmail --rcfile=getmailrc.yahoo --rcfile=getmailrc.wnohang --rcfile=getmailrc.gmail2
+
+[[ $? == 0 ]] && notify-send "Mailman" ".. Done "
diff --git a/gitprompt b/gitprompt
new file mode 100755
index 0000000..fd484ee
--- a/dev/null
+++ b/gitprompt
@@ -0,0 +1,20 @@
+#!/bin/zsh
+# detect git first
+
+trap 'print -n;exit 1' INT
+autoload -U colors && colors
+#isgit="$(git rev-parse --is-inside-work-tree 2>/dev/null)"
+#isgit="$(git rev-parse --is-inside-work-tree 2>/dev/null)"
+if test (../)#.git(N) 2>/dev/null;then
+ local gprompt="("
+ gprompt+="$(git branch | grep '*'| cut -d ' ' -f 2 | tr -d '\n')"
+ #timeout 3 git status -s
+ if [[ -n $(timeout -s KILL 2 git status -s) ]];then
+ gprompt+="⚡";
+ else
+ gprompt+="ʃ";
+ fi
+ gprompt+=")"
+ printf " %s%s" "%{${fg[yellow]}%}" $gprompt
+fi
+
diff --git a/imdbpy.pyc b/imdbpy.pyc
new file mode 100644
index 0000000..83912be
--- a/dev/null
+++ b/imdbpy.pyc
Binary files differ
diff --git a/installkernel b/installkernel
new file mode 100755
index 0000000..2456c65
--- a/dev/null
+++ b/installkernel
@@ -0,0 +1,140 @@
+#!/bin/bash
+set -x
+ARCH="x86"
+echo "Enter the kernel name"
+read kernelname
+#Copy the bzImage etc.
+
+cp -v System.map /boot/System.map26${kernelname}
+cp -v arch/$ARCH/boot/bzImage /boot/vmlinuz26${kernelname}
+install -m644 -D vmlinux /usr/src/linux-${kernver}/vmlinux
+pkgname="kernel26-$kernelname"
+
+echo "Generating mkinitcpio stuff"
+/bin/echo "ALL_kver=$kernver" > /etc/mkinitcpio.d/kernel26${kernelname}.kver
+
+/bin/echo "
+# mkinitcpio preset file for $kernelname
+
+########################################/
+# DO NOT EDIT THIS LINE:
+source /etc/mkinitcpio.d/kernel26${kernelname}.kver
+########################################
+ALL_config=\"/etc/mkinitcpio.conf\"
+
+PRESETS=('default' 'fallback')
+
+default_image=\"/boot/kernel26-${kernelname}.img\"
+
+fallback_image=\"/boot/kernel26-fallback${kernelname}.img\"
+fallback_options=\"-S autodetect\"
+" > /etc/mkinitcpio.d/kernel26${kernelname}.preset
+
+echo "Verify mkinitcpio.conf and others"
+read
+
+
+echo "Installing modules to /lib/modules/$kernver"
+read
+sudo make modules_install
+
+echo "Time for headers..hmmm"
+read
+
+ pushd /lib/modules/${kernver}
+ ln -sf /usr/src/linux-${kernver} build
+ popd
+ install -D -m644 Makefile \
+ /usr/src/linux-${kernver}/Makefile
+ install -D -m644 kernel/Makefile \
+ /usr/src/linux-${kernver}/kernel/Makefile
+ install -D -m644 .config \
+ /usr/src/linux-${kernver}/.config
+ mkdir -p /usr/src/linux-${kernver}/include
+
+ for i in acpi asm-generic config generated linux math-emu media net pcmcia scsi sound trace video; do
+ cp -a include/$i /usr/src/linux-${kernver}/include/
+ done
+
+ # copy arch includes for external modules
+ mkdir -p /usr/src/linux-${kernver}/arch/x86
+ cp -a arch/x86/include /usr/src/linux-${kernver}/arch/x86/
+
+ # copy files necessary for later builds, like nvidia and vmware
+ cp Module.symvers /usr/src/linux-${kernver}
+ cp -a scripts /usr/src/linux-${kernver}
+ # fix permissions on scripts dir
+ chmod og-w -R /usr/src/linux-${kernver}/scripts
+ mkdir -p /usr/src/linux-${kernver}/.tmp_versions
+
+ mkdir -p /usr/src/linux-${kernver}/arch/$ARCH/kernel
+
+ cp arch/$ARCH/Makefile /usr/src/linux-${kernver}/arch/$ARCH/
+ cp arch/$ARCH/kernel/asm-offsets.s /usr/src/linux-${kernver}/arch/$ARCH/kernel/
+
+ # add headers for lirc package
+ #mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video
+ #cp drivers/media/video/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video/
+ #for i in bt8xx cpia2 cx25840 cx88 em28xx et61x251 pwc saa7134 sn9c102 usbvideo zc0301; do
+ # mkdir -p /usr/src/linux-${kernver}/drivers/media/video/$i
+ # cp -a drivers/media/video/$i/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video/$i
+ #done
+ # add docbook makefile
+ install -D -m644 Documentation/DocBook/Makefile \
+ /usr/src/linux-${kernver}/Documentation/DocBook/Makefile
+ # add dm headers
+ #mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/md
+ #cp drivers/md/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/md
+ # add inotify.h
+ mkdir -p /usr/src/linux-${kernver}/include/linux
+ cp include/linux/inotify.h /usr/src/linux-${kernver}/include/linux/
+ # add wireless headers
+ #mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/net/mac80211/
+ #cp net/mac80211/*.h ${pkgdir}/usr/src/linux-${_kernver}/net/mac80211/
+ # add dvb headers for external modules
+ # in reference to:
+ # http://bugs.archlinux.org/task/9912
+ #mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/dvb-core
+ #cp drivers/media/dvb/dvb-core/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/dvb-core/
+ # add dvb headers for external modules
+ # in reference to:
+ # http://bugs.archlinux.org/task/11194
+ #mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/include/config/dvb/
+ #cp include/config/dvb/*.h ${pkgdir}/usr/src/linux-${_kernver}/include/config/dvb/
+ # add dvb headers for http://mcentral.de/hg/~mrec/em28xx-new
+ # in reference to:
+ # http://bugs.archlinux.org/task/13146
+ #mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
+ #cp drivers/media/dvb/frontends/lgdt330x.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
+ #cp drivers/media/video/msp3400-driver.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
+ # add xfs and shmem for aufs building
+ mkdir -p /usr/src/linux-${kernver}/fs/xfs
+ mkdir -p /usr/src/linux-${kernver}/mm
+ cp fs/xfs/xfs_sb.h /usr/src/linux-${kernver}/fs/xfs/xfs_sb.h
+ # add headers vor virtualbox
+ # in reference to:
+ # http://bugs.archlinux.org/task/14568
+ cp -a include/drm /usr/src/linux-${kernver}/include/
+ # add headers for broadcom wl
+ # in reference to:
+ # http://bugs.archlinux.org/task/14568
+ cp -a include/trace /usr/src/linux-${kernver}/include/
+ # copy in Kconfig files
+ for i in `find . -name "Kconfig*"`; do
+ mkdir -p /usr/src/linux-${kernver}/`echo $i | sed 's|/Kconfig.*||'`
+ cp $i /usr/src/linux-${kernver}/$i
+ done
+
+ chown -R root.root /usr/src/linux-${kernver}
+ find /usr/src/linux-${kernver} -type d -exec chmod 755 {} \;
+ # remove unneeded architectures
+ #rm -rf /usr/src/linux-${kernver}/arch/{alpha,arm,arm26,avr32,blackfin,cris,frv,h8300,ia64,m32r,m68k,m68knommu,mips,microblaze,mn10300,parisc,powerpc,ppc,s390,sh,sh64,sparc,sparc64,um,v850,xtensa}
+
+echo "Installing the initramfs image"
+mkinitcpio -p kernel26${kernelname}
+
+depmod $kernver
+set +x
+
+echo "Rebuild packages here: /home/raghavendra/Arch/Build/out-of-kernel"
diff --git a/jslint b/jslint
new file mode 100755
index 0000000..dd5832c
--- a/dev/null
+++ b/jslint
@@ -0,0 +1,4271 @@
+#!/usr/bin/js
+// (C)2002 Douglas Crockford
+// www.JSLint.com
+// Spidermonkey hacks by Andy Walker <walkeraj[at]gmail[dot]com
+"use strict";
+JSLINT = function() {
+ var adsafe_id, adsafe_may, adsafe_went, anonname, approved, atrule = {
+ 'import': true,
+ media: true,
+ 'font-face': true,
+ page: true
+ },
+ badbreak = {
+ ')': true,
+ ']': true,
+ '++': true,
+ '--': true
+ },
+ banned = {
+ apply: true,
+ 'arguments': true,
+ call: true,
+ callee: true,
+ caller: true,
+ constructor: true,
+ 'eval': true,
+ prototype: true,
+ unwatch: true,
+ valueOf: true,
+ watch: true
+ },
+ boolOptions = {
+ adsafe: true,
+ bitwise: true,
+ browser: true,
+ cap: true,
+ css: true,
+ debug: true,
+ eqeqeq: true,
+ evil: true,
+ forin: true,
+ fragment: true,
+ laxbreak: true,
+ nomen: true,
+ on: true,
+ onevar: true,
+ passfail: true,
+ plusplus: true,
+ regexp: true,
+ rhino: true,
+ undef: true,
+ safe: true,
+ sidebar: true,
+ strict: true,
+ sub: true,
+ white: true,
+ widget: true
+ },
+ browser = {
+ alert: true,
+ blur: true,
+ clearInterval: true,
+ clearTimeout: true,
+ close: true,
+ closed: true,
+ confirm: true,
+ console: true,
+ Debug: true,
+ defaultStatus: true,
+ document: true,
+ event: true,
+ focus: true,
+ frames: true,
+ getComputedStyle: true,
+ history: true,
+ Image: true,
+ length: true,
+ location: true,
+ moveBy: true,
+ moveTo: true,
+ name: true,
+ navigator: true,
+ onblur: true,
+ onerror: true,
+ onfocus: true,
+ onload: true,
+ onresize: true,
+ onunload: true,
+ open: true,
+ opener: true,
+ opera: true,
+ Option: true,
+ parent: true,
+ print: true,
+ prompt: true,
+ resizeBy: true,
+ resizeTo: true,
+ screen: true,
+ scroll: true,
+ scrollBy: true,
+ scrollTo: true,
+ self: true,
+ setInterval: true,
+ setTimeout: true,
+ status: true,
+ top: true,
+ window: true,
+ XMLHttpRequest: true
+ },
+ cssAttributeData,
+ cssAny,
+ cssColorData = {
+ "aliceblue": true,
+ "antiquewhite": true,
+ "aqua": true,
+ "aquamarine": true,
+ "azure": true,
+ "beige": true,
+ "bisque": true,
+ "black": true,
+ "blanchedalmond": true,
+ "blue": true,
+ "blueviolet": true,
+ "brown": true,
+ "burlywood": true,
+ "cadetblue": true,
+ "chartreuse": true,
+ "chocolate": true,
+ "coral": true,
+ "cornflowerblue": true,
+ "cornsilk": true,
+ "crimson": true,
+ "cyan": true,
+ "darkblue": true,
+ "darkcyan": true,
+ "darkgoldenrod": true,
+ "darkgray": true,
+ "darkgreen": true,
+ "darkkhaki": true,
+ "darkmagenta": true,
+ "darkolivegreen": true,
+ "darkorange": true,
+ "darkorchid": true,
+ "darkred": true,
+ "darksalmon": true,
+ "darkseagreen": true,
+ "darkslateblue": true,
+ "darkslategray": true,
+ "darkturquoise": true,
+ "darkviolet": true,
+ "deeppink": true,
+ "deepskyblue": true,
+ "dimgray": true,
+ "dodgerblue": true,
+ "firebrick": true,
+ "floralwhite": true,
+ "forestgreen": true,
+ "fuchsia": true,
+ "gainsboro": true,
+ "ghostwhite": true,
+ "gold": true,
+ "goldenrod": true,
+ "gray": true,
+ "green": true,
+ "greenyellow": true,
+ "honeydew": true,
+ "hotpink": true,
+ "indianred": true,
+ "indigo": true,
+ "ivory": true,
+ "khaki": true,
+ "lavender": true,
+ "lavenderblush": true,
+ "lawngreen": true,
+ "lemonchiffon": true,
+ "lightblue": true,
+ "lightcoral": true,
+ "lightcyan": true,
+ "lightgoldenrodyellow": true,
+ "lightgreen": true,
+ "lightpink": true,
+ "lightsalmon": true,
+ "lightseagreen": true,
+ "lightskyblue": true,
+ "lightslategray": true,
+ "lightsteelblue": true,
+ "lightyellow": true,
+ "lime": true,
+ "limegreen": true,
+ "linen": true,
+ "magenta": true,
+ "maroon": true,
+ "mediumaquamarine": true,
+ "mediumblue": true,
+ "mediumorchid": true,
+ "mediumpurple": true,
+ "mediumseagreen": true,
+ "mediumslateblue": true,
+ "mediumspringgreen": true,
+ "mediumturquoise": true,
+ "mediumvioletred": true,
+ "midnightblue": true,
+ "mintcream": true,
+ "mistyrose": true,
+ "moccasin": true,
+ "navajowhite": true,
+ "navy": true,
+ "oldlace": true,
+ "olive": true,
+ "olivedrab": true,
+ "orange": true,
+ "orangered": true,
+ "orchid": true,
+ "palegoldenrod": true,
+ "palegreen": true,
+ "paleturquoise": true,
+ "palevioletred": true,
+ "papayawhip": true,
+ "peachpuff": true,
+ "peru": true,
+ "pink": true,
+ "plum": true,
+ "powderblue": true,
+ "purple": true,
+ "red": true,
+ "rosybrown": true,
+ "royalblue": true,
+ "saddlebrown": true,
+ "salmon": true,
+ "sandybrown": true,
+ "seagreen": true,
+ "seashell": true,
+ "sienna": true,
+ "silver": true,
+ "skyblue": true,
+ "slateblue": true,
+ "slategray": true,
+ "snow": true,
+ "springgreen": true,
+ "steelblue": true,
+ "tan": true,
+ "teal": true,
+ "thistle": true,
+ "tomato": true,
+ "turquoise": true,
+ "violet": true,
+ "wheat": true,
+ "white": true,
+ "whitesmoke": true,
+ "yellow": true,
+ "yellowgreen": true
+ },
+ cssBorderStyle,
+ cssLengthData = {
+ '%': true,
+ 'cm': true,
+ 'em': true,
+ 'ex': true,
+ 'in': true,
+ 'mm': true,
+ 'pc': true,
+ 'pt': true,
+ 'px': true
+ },
+ escapes = {
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '"': '\\"',
+ '/': '\\/',
+ '\\': '\\\\'
+ },
+ funct,
+ functions,
+ global,
+ htmltag = {
+ a: {},
+ abbr: {},
+ acronym: {},
+ address: {},
+ applet: {},
+ area: {
+ empty: true,
+ parent: ' map '
+ },
+ b: {},
+ base: {
+ empty: true,
+ parent: ' head '
+ },
+ bdo: {},
+ big: {},
+ blockquote: {},
+ body: {
+ parent: ' html noframes '
+ },
+ br: {
+ empty: true
+ },
+ button: {},
+ canvas: {
+ parent: ' body p div th td '
+ },
+ caption: {
+ parent: ' table '
+ },
+ center: {},
+ cite: {},
+ code: {},
+ col: {
+ empty: true,
+ parent: ' table colgroup '
+ },
+ colgroup: {
+ parent: ' table '
+ },
+ dd: {
+ parent: ' dl '
+ },
+ del: {},
+ dfn: {},
+ dir: {},
+ div: {},
+ dl: {},
+ dt: {
+ parent: ' dl '
+ },
+ em: {},
+ embed: {},
+ fieldset: {},
+ font: {},
+ form: {},
+ frame: {
+ empty: true,
+ parent: ' frameset '
+ },
+ frameset: {
+ parent: ' html frameset '
+ },
+ h1: {},
+ h2: {},
+ h3: {},
+ h4: {},
+ h5: {},
+ h6: {},
+ head: {
+ parent: ' html '
+ },
+ html: {
+ parent: '*'
+ },
+ hr: {
+ empty: true
+ },
+ i: {},
+ iframe: {},
+ img: {
+ empty: true
+ },
+ input: {
+ empty: true
+ },
+ ins: {},
+ kbd: {},
+ label: {},
+ legend: {
+ parent: ' fieldset '
+ },
+ li: {
+ parent: ' dir menu ol ul '
+ },
+ link: {
+ empty: true,
+ parent: ' head '
+ },
+ map: {},
+ menu: {},
+ meta: {
+ empty: true,
+ parent: ' head noframes noscript '
+ },
+ noframes: {
+ parent: ' html body '
+ },
+ noscript: {
+ parent: ' body head noframes '
+ },
+ object: {},
+ ol: {},
+ optgroup: {
+ parent: ' select '
+ },
+ option: {
+ parent: ' optgroup select '
+ },
+ p: {},
+ param: {
+ empty: true,
+ parent: ' applet object '
+ },
+ pre: {},
+ q: {},
+ samp: {},
+ script: {
+ empty: true,
+ parent: ' body div frame head iframe p pre span '
+ },
+ select: {},
+ small: {},
+ span: {},
+ strong: {},
+ style: {
+ parent: ' head ',
+ empty: true
+ },
+ sub: {},
+ sup: {},
+ table: {},
+ tbody: {
+ parent: ' table '
+ },
+ td: {
+ parent: ' tr '
+ },
+ textarea: {},
+ tfoot: {
+ parent: ' table '
+ },
+ th: {
+ parent: ' tr '
+ },
+ thead: {
+ parent: ' table '
+ },
+ title: {
+ parent: ' head '
+ },
+ tr: {
+ parent: ' table tbody thead tfoot '
+ },
+ tt: {},
+ u: {},
+ ul: {},
+ 'var': {}
+ },
+ ids,
+ implied,
+ inblock,
+ indent,
+ jsonmode,
+ lines,
+ lookahead,
+ member,
+ membersOnly,
+ nexttoken,
+ noreach,
+ option,
+ predefined,
+ prereg,
+ prevtoken,
+ pseudorule = {
+ 'first-child': true,
+ link: true,
+ visited: true,
+ hover: true,
+ active: true,
+ focus: true,
+ lang: true,
+ 'first-letter': true,
+ 'first-line': true,
+ before: true,
+ after: true
+ },
+ rhino = {
+ defineClass: true,
+ deserialize: true,
+ gc: true,
+ help: true,
+ load: true,
+ loadClass: true,
+ print: true,
+ quit: true,
+ readFile: true,
+ readUrl: true,
+ runCommand: true,
+ seal: true,
+ serialize: true,
+ spawn: true,
+ sync: true,
+ toint32: true,
+ version: true
+ },
+ scope,
+ sidebar = {
+ System: true
+ },
+ src,
+ stack,
+ standard = {
+ Array: true,
+ Boolean: true,
+ Date: true,
+ decodeURI: true,
+ decodeURIComponent: true,
+ encodeURI: true,
+ encodeURIComponent: true,
+ Error: true,
+ 'eval': true,
+ EvalError: true,
+ Function: true,
+ isFinite: true,
+ isNaN: true,
+ JSON: true,
+ Math: true,
+ Number: true,
+ Object: true,
+ parseInt: true,
+ parseFloat: true,
+ RangeError: true,
+ ReferenceError: true,
+ RegExp: true,
+ String: true,
+ SyntaxError: true,
+ TypeError: true,
+ URIError: true
+ },
+ standard_member = {
+ E: true,
+ LN2: true,
+ LN10: true,
+ LOG2E: true,
+ LOG10E: true,
+ PI: true,
+ SQRT1_2: true,
+ SQRT2: true,
+ MAX_VALUE: true,
+ MIN_VALUE: true,
+ NEGATIVE_INFINITY: true,
+ POSITIVE_INFINITY: true
+ },
+ syntax = {},
+ tab,
+ token,
+ urls,
+ warnings,
+ widget = {
+ alert: true,
+ appleScript: true,
+ animator: true,
+ appleScript: true,
+ beep: true,
+ bytesToUIString: true,
+ Canvas: true,
+ chooseColor: true,
+ chooseFile: true,
+ chooseFolder: true,
+ closeWidget: true,
+ COM: true,
+ convertPathToHFS: true,
+ convertPathToPlatform: true,
+ CustomAnimation: true,
+ escape: true,
+ FadeAnimation: true,
+ filesystem: true,
+ focusWidget: true,
+ form: true,
+ FormField: true,
+ Frame: true,
+ HotKey: true,
+ Image: true,
+ include: true,
+ isApplicationRunning: true,
+ iTunes: true,
+ konfabulatorVersion: true,
+ log: true,
+ MenuItem: true,
+ MoveAnimation: true,
+ openURL: true,
+ play: true,
+ Point: true,
+ popupMenu: true,
+ preferenceGroups: true,
+ preferences: true,
+ print: true,
+ prompt: true,
+ random: true,
+ reloadWidget: true,
+ ResizeAnimation: true,
+ resolvePath: true,
+ resumeUpdates: true,
+ RotateAnimation: true,
+ runCommand: true,
+ runCommandInBg: true,
+ saveAs: true,
+ savePreferences: true,
+ screen: true,
+ ScrollBar: true,
+ showWidgetPreferences: true,
+ sleep: true,
+ speak: true,
+ suppressUpdates: true,
+ system: true,
+ tellWidget: true,
+ Text: true,
+ TextArea: true,
+ Timer: true,
+ unescape: true,
+ updateNow: true,
+ URL: true,
+ widget: true,
+ Window: true,
+ XMLDOM: true,
+ XMLHttpRequest: true,
+ yahooCheckLogin: true,
+ yahooLogin: true,
+ yahooLogout: true
+ },
+ xmode,
+ xquote,
+ ax = /@cc|<\/?|script|\]*s\]|<\s*!|&lt/i,
+ cx = /[\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/,
+ tx = /^\s*([(){}\[.,:;'"~\?\]#@]|==?=?|\/(\*(global|extern|jslint|member|members)?|=|\/)?|\*[\/=]?|\+[+=]?|-[\-=]?|%=?|&[&=]?|\|[|=]?|>>?>?=?|<([\/=]|\!(\[|--)?|<=?)?|\^=?|\!=?=?|[a-zA-Z_$][a-zA-Z0-9_$]*|[0-9]+([xX][0-9a-fA-F]+|\.[0-9]*)?([eE][+\-]?[0-9]+)?)/,
+ hx = /^\s*(['"=>\/&#]|<[\/!]?|[a-zA-Z][a-zA-Z0-9_\-]*|--)/,
+ ox = /[>&]|<[\/!]?|--/,
+ lx = /\*\/|\/\*/,
+ ix = /^([a-zA-Z_$][a-zA-Z0-9_$]*)$/,
+ jx = /^(?:javascript|jscript|ecmascript|vbscript|mocha|livescript)\s*:/i,
+ ux = /&|\+|\u00AD|\.\.|\/\*|%[^;]|base64|url|expression|data|mailto/i,
+ sx = /^\s*([{:#*%.=,>+\[\]@()"';*]|[a-zA-Z0-9_][a-zA-Z0-9_\-]*|<\/|\/\*)/,
+ ssx = /^\s*([@#!"'};:\-\/%.=,+\[\]()*_]|[a-zA-Z][a-zA-Z0-9._\-]*|\d+(?:\.\d+)?|<\/)/,
+ rx = {
+ outer: hx,
+ html: hx,
+ style: sx,
+ styleproperty: ssx
+ };
+ function F() {}
+ if (typeof Object.create !== 'function') {
+ Object.create = function(o) {
+ F.prototype = o;
+ return new F();
+ };
+ }
+ Object.prototype.union = function(o) {
+ var n;
+ for (n in o) {
+ if (o.hasOwnProperty(n)) {
+ this[n] = o[n];
+ }
+ }
+ };
+ String.prototype.entityify = function() {
+ return this.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;');
+ };
+ String.prototype.isAlpha = function() {
+ return (this >= 'a' && this <= 'z\uffff') || (this >= 'A' && this <= 'Z\uffff');
+ };
+ String.prototype.isDigit = function() {
+ return (this >= '0' && this <= '9');
+ };
+ String.prototype.supplant = function(o) {
+ return this.replace(/\{([^{}]*)\}/g,
+ function(a, b) {
+ var r = o[b];
+ return typeof r === 'string' || typeof r === 'number' ? r: a;
+ });
+ };
+ String.prototype.name = function() {
+ if (ix.test(this)) {
+ return this;
+ }
+ if (/[&<"\/\\\x00-\x1f]/.test(this)) {
+ return '"' + this.replace(/[&<"\/\\\x00-\x1f]/g,
+ function(a) {
+ var c = escapes[a];
+ if (c) {
+ return c;
+ }
+ c = a.charCodeAt();
+ return '\\u00' + Math.floor(c / 16).toString(16) + (c % 16).toString(16);
+ }) + '"';
+ }
+ return '"' + this + '"';
+ };
+ function assume() {
+ if (!option.safe) {
+ if (option.rhino) {
+ predefined.union(rhino);
+ }
+ if (option.browser || option.sidebar) {
+ predefined.union(browser);
+ }
+ if (option.sidebar) {
+ predefined.union(sidebar);
+ }
+ if (option.widget) {
+ predefined.union(widget);
+ }
+ }
+ }
+ function quit(m, l, ch) {
+ throw {
+ name: 'JSLintError',
+ line: l,
+ character: ch,
+ message: m + " (" + Math.floor((l / lines.length) * 100) + "% scanned)."
+ };
+ }
+ function warning(m, t, a, b, c, d) {
+ var ch, l, w;
+ t = t || nexttoken;
+ if (t.id === '(end)') {
+ t = token;
+ }
+ l = t.line || 0;
+ ch = t.from || 0;
+ w = {
+ id: '(error)',
+ raw: m,
+ evidence: lines[l] || '',
+ line: l,
+ character: ch,
+ a: a,
+ b: b,
+ c: c,
+ d: d
+ };
+ w.reason = m.supplant(w);
+ JSLINT.errors.push(w);
+ if (option.passfail) {
+ quit('Stopping. ', l, ch);
+ }
+ warnings += 1;
+ if (warnings === 50) {
+ quit("Too many errors.", l, ch);
+ }
+ return w;
+ }
+ function warningAt(m, l, ch, a, b, c, d) {
+ return warning(m, {
+ line: l,
+ from: ch
+ },
+ a, b, c, d);
+ }
+ function error(m, t, a, b, c, d) {
+ var w = warning(m, t, a, b, c, d);
+ quit("Stopping, unable to continue.", w.line, w.character);
+ }
+ function errorAt(m, l, ch, a, b, c, d) {
+ return error(m, {
+ line: l,
+ from: ch
+ },
+ a, b, c, d);
+ }
+ var lex = function lex() {
+ var character, from, line, s;
+ function nextLine() {
+ var at;
+ line += 1;
+ if (line >= lines.length) {
+ return false;
+ }
+ character = 0;
+ s = lines[line].replace(/\t/g, tab);
+ at = s.search(cx);
+ if (at >= 0) {
+ warningAt("Unsafe character.", line, at);
+ }
+ return true;
+ }
+ function it(type, value) {
+ var i, t;
+ if (type === '(color)') {
+ t = {
+ type: type
+ };
+ } else if (type === '(punctuator)' || (type === '(identifier)' && syntax.hasOwnProperty(value))) {
+ t = syntax[value];
+ if (!t.id) {
+ t = syntax[type];
+ }
+ } else {
+ t = syntax[type];
+ }
+ t = Object.create(t);
+ if (type === '(string)' || type === '(range)') {
+ if (jx.test(value)) {
+ warningAt("Script URL.", line, from);
+ }
+ }
+ if (type === '(identifier)') {
+ t.identifier = true;
+ if (option.nomen && value.charAt(0) === '_') {
+ warningAt("Unexpected '_' in '{a}'.", line, from, value);
+ }
+ }
+ t.value = value;
+ t.line = line;
+ t.character = character;
+ t.from = from;
+ i = t.id;
+ if (i !== '(endline)') {
+ prereg = i && (('(,=:[!&|?{};'.indexOf(i.charAt(i.length - 1)) >= 0) || i === 'return');
+ }
+ return t;
+ }
+ return {
+ init: function(source) {
+ if (typeof source === 'string') {
+ lines = source.replace(/\r\n/g, '\n').replace(/\r/g, '\n').split('\n');
+ } else {
+ lines = source;
+ }
+ line = -1;
+ nextLine();
+ from = 0;
+ },
+ range: function(begin, end) {
+ var c, value = '';
+ from = character;
+ if (s.charAt(0) !== begin) {
+ errorAt("Expected '{a}' and instead saw '{b}'.", line, character, begin, s.charAt(0));
+ }
+ for (;;) {
+ s = s.slice(1);
+ character += 1;
+ c = s.charAt(0);
+ switch (c) {
+ case '':
+ errorAt("Missing '{a}'.", line, character, c);
+ break;
+ case end:
+ s = s.slice(1);
+ character += 1;
+ return it('(range)', value);
+ case xquote:
+ case '\\':
+ case '\'':
+ case '"':
+ warningAt("Unexpected '{a}'.", line, character, c);
+ }
+ value += c;
+ }
+ },
+ token: function() {
+ var b, c, captures, d, depth, high, i, l, low, q, t;
+ function match(x) {
+ var r = x.exec(s),
+ r1;
+ if (r) {
+ l = r[0].length;
+ r1 = r[1];
+ c = r1.charAt(0);
+ s = s.substr(l);
+ character += l;
+ from = character - r1.length;
+ return r1;
+ }
+ }
+ function string(x) {
+ var c, j, r = '';
+ if (jsonmode && x !== '"') {
+ warningAt("Strings must use doublequote.", line, character);
+ }
+ if (xquote === x || (xmode === 'scriptstring' && !xquote)) {
+ return it('(punctuator)', x);
+ }
+ function esc(n) {
+ var i = parseInt(s.substr(j + 1, n), 16);
+ j += n;
+ if (i >= 32 && i <= 127 && i !== 34 && i !== 92 && i !== 39) {
+ warningAt("Unnecessary escapement.", line, character);
+ }
+ character += n;
+ c = String.fromCharCode(i);
+ }
+ j = 0;
+ for (;;) {
+ while (j >= s.length) {
+ j = 0;
+ if (xmode !== 'html' || !nextLine()) {
+ errorAt("Unclosed string.", line, from);
+ }
+ }
+ c = s.charAt(j);
+ if (c === x) {
+ character += 1;
+ s = s.substr(j + 1);
+ return it('(string)', r, x);
+ }
+ if (c < ' ') {
+ if (c === '\n' || c === '\r') {
+ break;
+ }
+ warningAt("Control character in string: {a}.", line, character + j, s.slice(0, j));
+ } else if (c === xquote) {
+ warningAt("Bad HTML string", line, character + j);
+ } else if (c === '<') {
+ if (option.safe && xmode === 'html') {
+ warningAt("ADsafe string violation.", line, character + j);
+ } else if (s.charAt(j + 1) === '/' && (xmode || option.safe)) {
+ warningAt("Expected '<\\/' and instead saw '</'.", line, character);
+ } else if (s.charAt(j + 1) === '!' && (xmode || option.safe)) {
+ warningAt("Unexpected '<!' in a string.", line, character);
+ }
+ } else if (c === '\\') {
+ if (xmode === 'html') {
+ if (option.safe) {
+ warningAt("ADsafe string violation.", line, character + j);
+ }
+ } else if (xmode === 'styleproperty') {
+ j += 1;
+ character += 1;
+ c = s.charAt(j);
+ if (c !== x) {
+ warningAt("Escapement in style string.", line, character + j);
+ }
+ } else {
+ j += 1;
+ character += 1;
+ c = s.charAt(j);
+ switch (c) {
+ case xquote:
+ warningAt("Bad HTML string", line, character + j);
+ break;
+ case '\\':
+ case '\'':
+ case '"':
+ case '/':
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 't':
+ c = '\t';
+ break;
+ case 'u':
+ esc(4);
+ break;
+ case 'v':
+ c = '\v';
+ break;
+ case 'x':
+ if (jsonmode) {
+ warningAt("Avoid \\x-.", line, character);
+ }
+ esc(2);
+ break;
+ default:
+ warningAt("Bad escapement.", line, character);
+ }
+ }
+ }
+ r += c;
+ character += 1;
+ j += 1;
+ }
+ }
+ for (;;) {
+ if (!s) {
+ return it(nextLine() ? '(endline)': '(end)', '');
+ }
+ while (xmode === 'outer') {
+ i = s.search(ox);
+ if (i === 0) {
+ break;
+ } else if (i > 0) {
+ character += 1;
+ s = s.slice(i);
+ break;
+ } else {
+ if (!nextLine()) {
+ return it('(end)', '');
+ }
+ }
+ }
+ t = match(rx[xmode] || tx);
+ if (!t) {
+ if (xmode === 'html') {
+ return it('(error)', s.charAt(0));
+ } else {
+ t = '';
+ c = '';
+ while (s && s < '!') {
+ s = s.substr(1);
+ }
+ if (s) {
+ errorAt("Unexpected '{a}'.", line, character, s.substr(0, 1));
+ }
+ }
+ } else {
+ if (c.isAlpha() || c === '_' || c === '$') {
+ return it('(identifier)', t);
+ }
+ if (c.isDigit()) {
+ if (xmode !== 'style' && !isFinite(Number(t))) {
+ warningAt("Bad number '{a}'.", line, character, t);
+ }
+ if (xmode !== 'styleproperty' && s.substr(0, 1).isAlpha()) {
+ warningAt("Missing space after '{a}'.", line, character, t);
+ }
+ if (c === '0') {
+ d = t.substr(1, 1);
+ if (d.isDigit()) {
+ if (token.id !== '.' && xmode !== 'styleproperty') {
+ warningAt("Don't use extra leading zeros '{a}'.", line, character, t);
+ }
+ } else if (jsonmode && (d === 'x' || d === 'X')) {
+ warningAt("Avoid 0x-. '{a}'.", line, character, t);
+ }
+ }
+ if (t.substr(t.length - 1) === '.') {
+ warningAt("A trailing decimal point can be confused with a dot '{a}'.", line, character, t);
+ }
+ return it('(number)', t);
+ }
+ switch (t) {
+ case '"':
+ case "'":
+ return string(t);
+ case '//':
+ if (src || (xmode && xmode !== 'script')) {
+ warningAt("Unexpected comment.", line, character);
+ } else if (xmode === 'script' && /<\s*\//i.test(s)) {
+ warningAt("Unexpected <\/ in comment.", line, character);
+ } else if ((option.safe || xmode === 'script') && ax.test(s)) {
+ warningAt("Dangerous comment.", line, character);
+ }
+ s = '';
+ token.comment = true;
+ break;
+ case '/*':
+ if (src || (xmode && xmode !== 'script' && xmode !== 'style' && xmode !== 'styleproperty')) {
+ warningAt("Unexpected comment.", line, character);
+ }
+ if (option.safe && ax.test(s)) {
+ warningAt("ADsafe comment violation.", line, character);
+ }
+ for (;;) {
+ i = s.search(lx);
+ if (i >= 0) {
+ break;
+ }
+ if (!nextLine()) {
+ errorAt("Unclosed comment.", line, character);
+ } else {
+ if (option.safe && ax.test(s)) {
+ warningAt("ADsafe comment violation.", line, character);
+ }
+ }
+ }
+ character += i + 2;
+ if (s.substr(i, 1) === '/') {
+ errorAt("Nested comment.", line, character);
+ }
+ s = s.substr(i + 2);
+ token.comment = true;
+ break;
+ case '/*global':
+ case '/*extern':
+ case '/*members':
+ case '/*member':
+ case '/*jslint':
+ case '*/':
+ return {
+ value:
+ t,
+ type: 'special',
+ line: line,
+ character: character,
+ from: from
+ };
+ case '':
+ break;
+ case '/':
+ if (prereg) {
+ depth = 0;
+ captures = 0;
+ l = 0;
+ for (;;) {
+ b = true;
+ c = s.charAt(l);
+ l += 1;
+ switch (c) {
+ case '':
+ errorAt("Unclosed regular expression.", line, from);
+ return;
+ case '/':
+ if (depth > 0) {
+ warningAt("Unescaped '{a}'.", line, from + l, '/');
+ }
+ c = s.substr(0, l - 1);
+ q = {
+ g: true,
+ i: true,
+ m: true
+ };
+ while (q[s.charAt(l)] === true) {
+ q[s.charAt(l)] = false;
+ l += 1;
+ }
+ character += l;
+ s = s.substr(l);
+ return it('(regexp)', c);
+ case '\\':
+ c = s.charAt(l);
+ if (c < ' ') {
+ warningAt("Unexpected control character in regular expression.", line, from + l);
+ } else if (c === '<') {
+ warningAt("Unexpected escaped character '{a}' in regular expression.", line, from + l, c);
+ }
+ l += 1;
+ break;
+ case '(':
+ depth += 1;
+ b = false;
+ if (s.charAt(l) === '?') {
+ l += 1;
+ switch (s.charAt(l)) {
+ case ':':
+ case '=':
+ case '!':
+ l += 1;
+ break;
+ default:
+ warningAt("Expected '{a}' and instead saw '{b}'.", line, from + l, ':', s.charAt(l));
+ }
+ } else {
+ captures += 1;
+ }
+ break;
+ case ')':
+ if (depth === 0) {
+ warningAt("Unescaped '{a}'.", line, from + l, ')');
+ } else {
+ depth -= 1;
+ }
+ break;
+ case ' ':
+ q = 1;
+ while (s.charAt(l) === ' ') {
+ l += 1;
+ q += 1;
+ }
+ if (q > 1) {
+ warningAt("Spaces are hard to count. Use {{a}}.", line, from + l, q);
+ }
+ break;
+ case '[':
+ if (s.charAt(l) === '^') {
+ l += 1;
+ }
+ q = false;
+ klass: do {
+ c = s.charAt(l);
+ l += 1;
+ switch (c) {
+ case '[':
+ case '^':
+ warningAt("Unescaped '{a}'.", line, from + l, c);
+ q = true;
+ break;
+ case '-':
+ if (q) {
+ q = false;
+ } else {
+ warningAt("Unescaped '{a}'.", line, from + l, '-');
+ q = true;
+ }
+ break;
+ case ']':
+ if (!q) {
+ warningAt("Unescaped '{a}'.", line, from + l - 1, '-');
+ }
+ break klass;
+ case '\\':
+ c = s.charAt(l);
+ if (c < ' ') {
+ warningAt("Unexpected control character in regular expression.", line, from + l);
+ } else if (c === '<') {
+ warningAt("Unexpected escaped character '{a}' in regular expression.", line, from + l, c);
+ }
+ l += 1;
+ q = true;
+ break;
+ case '/':
+ warningAt("Unescaped '{a}'.", line, from + l - 1, '/');
+ q = true;
+ break;
+ case '<':
+ if (xmode === 'script') {
+ c = s.charAt(l);
+ if (c === '!' || c === '/') {
+ warningAt("HTML confusion in regular expression '<{a}'.", line, from + l, c);
+ }
+ }
+ q = true;
+ break;
+ default:
+ q = true;
+ }
+ } while ( c );
+ break;
+ case '.':
+ if (option.regexp) {
+ warningAt("Unexpected '{a}'.", line, from + l, c);
+ }
+ break;
+ case ']':
+ case '?':
+ case '{':
+ case '}':
+ case '+':
+ case '*':
+ warningAt("Unescaped '{a}'.", line, from + l, c);
+ break;
+ case '<':
+ if (xmode === 'script') {
+ c = s.charAt(l);
+ if (c === '!' || c === '/') {
+ warningAt("HTML confusion in regular expression '<{a}'.", line, from + l, c);
+ }
+ }
+ }
+ if (b) {
+ switch (s.charAt(l)) {
+ case '?':
+ case '+':
+ case '*':
+ l += 1;
+ if (s.charAt(l) === '?') {
+ l += 1;
+ }
+ break;
+ case '{':
+ l += 1;
+ c = s.charAt(l);
+ if (c < '0' || c > '9') {
+ warningAt("Expected a number and instead saw '{a}'.", line, from + l, c);
+ }
+ l += 1;
+ low = +c;
+ for (;;) {
+ c = s.charAt(l);
+ if (c < '0' || c > '9') {
+ break;
+ }
+ l += 1;
+ low = +c + (low * 10);
+ }
+ high = low;
+ if (c === ',') {
+ l += 1;
+ high = Infinity;
+ c = s.charAt(l);
+ if (c >= '0' && c <= '9') {
+ l += 1;
+ high = +c;
+ for (;;) {
+ c = s.charAt(l);
+ if (c < '0' || c > '9') {
+ break;
+ }
+ l += 1;
+ high = +c + (high * 10);
+ }
+ }
+ }
+ if (s.charAt(l) !== '}') {
+ warningAt("Expected '{a}' and instead saw '{b}'.", line, from + l, '}', c);
+ } else {
+ l += 1;
+ }
+ if (s.charAt(l) === '?') {
+ l += 1;
+ }
+ if (low > high) {
+ warningAt("'{a}' should not be greater than '{b}'.", line, from + l, low, high);
+ }
+ }
+ }
+ }
+ c = s.substr(0, l - 1);
+ character += l;
+ s = s.substr(l);
+ return it('(regexp)', c);
+ }
+ return it('(punctuator)', t);
+ case '#':
+ if (xmode === 'html' || xmode === 'styleproperty') {
+ for (;;) {
+ c = s.charAt(0);
+ if ((c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F')) {
+ break;
+ }
+ character += 1;
+ s = s.substr(1);
+ t += c;
+ }
+ if (t.length !== 4 && t.length !== 7) {
+ warningAt("Bad hex color '{a}'.", line, from + l, t);
+ }
+ return it('(color)', t);
+ }
+ return it('(punctuator)', t);
+ default:
+ if (xmode === 'outer' && c === '&') {
+ character += 1;
+ s = s.substr(1);
+ for (;;) {
+ c = s.charAt(0);
+ character += 1;
+ s = s.substr(1);
+ if (c === ';') {
+ break;
+ }
+ if (! ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || c === '#')) {
+ errorAt("Bad entity", line, from + l, character);
+ }
+ }
+ break;
+ }
+ return it('(punctuator)', t);
+ }
+ }
+ }
+ }
+ };
+ } ();
+ function addlabel(t, type) {
+ if (t === 'hasOwnProperty') {
+ error("'hasOwnProperty' is a really bad name.");
+ }
+ if (option.safe && funct['(global)']) {
+ warning('ADsafe global: ' + t + '.', token);
+ }
+ if (funct.hasOwnProperty(t)) {
+ warning(funct[t] === true ? "'{a}' was used before it was defined.": "'{a}' is already defined.", nexttoken, t);
+ }
+ funct[t] = type;
+ if (type === 'label') {
+ scope[t] = funct;
+ } else if (funct['(global)']) {
+ global[t] = funct;
+ if (implied.hasOwnProperty(t)) {
+ warning("'{a}' was used before it was defined.", nexttoken, t);
+ delete implied[t];
+ }
+ } else {
+ funct['(scope)'][t] = funct;
+ }
+ }
+ function doOption() {
+ var b, obj, filter, o = nexttoken.value,
+ t, v;
+ switch (o) {
+ case '*/':
+ error("Unbegun comment.");
+ break;
+ case '/*global':
+ case '/*extern':
+ if (option.safe) {
+ warning("ADsafe restriction.");
+ }
+ obj = predefined;
+ break;
+ case '/*members':
+ case '/*member':
+ o = '/*members';
+ if (!membersOnly) {
+ membersOnly = {};
+ }
+ obj = membersOnly;
+ break;
+ case '/*jslint':
+ if (option.safe) {
+ warning("ADsafe restriction.");
+ }
+ obj = option;
+ filter = boolOptions;
+ }
+ for (;;) {
+ t = lex.token();
+ if (t.id === ',') {
+ t = lex.token();
+ }
+ while (t.id === '(endline)') {
+ t = lex.token();
+ }
+ if (t.type === 'special' && t.value === '*/') {
+ break;
+ }
+ if (t.type !== '(string)' && t.type !== '(identifier)' && o !== '/*members') {
+ error("Bad option.", t);
+ }
+ if (filter) {
+ if (filter[t.value] !== true) {
+ error("Bad option.", t);
+ }
+ v = lex.token();
+ if (v.id !== ':') {
+ error("Expected '{a}' and instead saw '{b}'.", t, ':', t.value);
+ }
+ v = lex.token();
+ if (v.value === 'true') {
+ b = true;
+ } else if (v.value === 'false') {
+ b = false;
+ } else {
+ error("Expected '{a}' and instead saw '{b}'.", t, 'true', t.value);
+ }
+ } else {
+ b = true;
+ }
+ obj[t.value] = b;
+ }
+ if (filter) {
+ assume();
+ }
+ }
+ function peek(p) {
+ var i = p || 0,
+ j = 0,
+ t;
+ while (j <= i) {
+ t = lookahead[j];
+ if (!t) {
+ t = lookahead[j] = lex.token();
+ }
+ j += 1;
+ }
+ return t;
+ }
+ function advance(id, t) {
+ var l;
+ switch (token.id) {
+ case '(number)':
+ if (nexttoken.id === '.') {
+ warning("A dot following a number can be confused with a decimal point.", token);
+ }
+ break;
+ case '-':
+ if (nexttoken.id === '-' || nexttoken.id === '--') {
+ warning("Confusing minusses.");
+ }
+ break;
+ case '+':
+ if (nexttoken.id === '+' || nexttoken.id === '++') {
+ warning("Confusing plusses.");
+ }
+ break;
+ }
+ if (token.type === '(string)' || token.identifier) {
+ anonname = token.value;
+ }
+ if (id && nexttoken.id !== id) {
+ if (t) {
+ if (nexttoken.id === '(end)') {
+ warning("Unmatched '{a}'.", t, t.id);
+ } else {
+ warning("Expected '{a}' to match '{b}' from line {c} and instead saw '{d}'.", nexttoken, id, t.id, t.line + 1, nexttoken.value);
+ }
+ } else if (nexttoken.type !== '(identifier)' || nexttoken.value !== id) {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, id, nexttoken.value);
+ }
+ }
+ prevtoken = token;
+ token = nexttoken;
+ for (;;) {
+ nexttoken = lookahead.shift() || lex.token();
+ if (nexttoken.id === '(end)' || nexttoken.id === '(error)') {
+ return;
+ }
+ if (nexttoken.type === 'special') {
+ doOption();
+ } else {
+ if (nexttoken.id !== '(endline)') {
+ break;
+ }
+ l = !xmode && !option.laxbreak && (token.type === '(string)' || token.type === '(number)' || token.type === '(identifier)' || badbreak[token.id]);
+ }
+ }
+ if (l) {
+ switch (nexttoken.id) {
+ case '{':
+ case '}':
+ case ']':
+ case '.':
+ break;
+ case ')':
+ switch (token.id) {
+ case ')':
+ case '}':
+ case ']':
+ break;
+ default:
+ warning("Line breaking error '{a}'.", token, ')');
+ }
+ break;
+ default:
+ warning("Line breaking error '{a}'.", token, token.value);
+ }
+ }
+ }
+ function parse(rbp, initial) {
+ var left, o;
+ if (nexttoken.id === '(end)') {
+ error("Unexpected early end of program.", token);
+ }
+ advance();
+ if (option.safe && predefined[token.value] === true && (nexttoken.id !== '(' && nexttoken.id !== '.')) {
+ warning('ADsafe violation.', token);
+ }
+ if (initial) {
+ anonname = 'anonymous';
+ funct['(verb)'] = token.value;
+ }
+ if (initial === true && token.fud) {
+ left = token.fud();
+ } else {
+ if (token.nud) {
+ o = token.exps;
+ left = token.nud();
+ } else {
+ if (nexttoken.type === '(number)' && token.id === '.') {
+ warning("A leading decimal point can be confused with a dot: '.{a}'.", token, nexttoken.value);
+ advance();
+ return token;
+ } else {
+ error("Expected an identifier and instead saw '{a}'.", token, token.id);
+ }
+ }
+ while (rbp < nexttoken.lbp) {
+ o = nexttoken.exps;
+ advance();
+ if (token.led) {
+ left = token.led(left);
+ } else {
+ error("Expected an operator and instead saw '{a}'.", token, token.id);
+ }
+ }
+ if (initial && !o) {
+ warning("Expected an assignment or function call and instead saw an expression.", token);
+ }
+ }
+ if (!option.evil && left && left.value === 'eval') {
+ warning("eval is evil.", left);
+ }
+ return left;
+ }
+ function adjacent(left, right) {
+ left = left || token;
+ right = right || nexttoken;
+ if (option.white || xmode === 'styleproperty' || xmode === 'style') {
+ if (left.character !== right.from && left.line === right.line) {
+ warning("Unexpected space after '{a}'.", nexttoken, left.value);
+ }
+ }
+ }
+ function nospace(left, right) {
+ left = left || token;
+ right = right || nexttoken;
+ if (option.white && !left.comment) {
+ if (left.line === right.line) {
+ adjacent(left, right);
+ }
+ }
+ }
+ function nonadjacent(left, right) {
+ left = left || token;
+ right = right || nexttoken;
+ if (option.white) {
+ if (left.character === right.from) {
+ warning("Missing space after '{a}'.", nexttoken, left.value);
+ }
+ }
+ }
+ function indentation(bias) {
+ var i;
+ if (option.white && nexttoken.id !== '(end)') {
+ i = indent + (bias || 0);
+ if (nexttoken.from !== i) {
+ warning("Expected '{a}' to have an indentation of {b} instead of {c}.", nexttoken, nexttoken.value, i, nexttoken.from);
+ }
+ }
+ }
+ function nolinebreak(t) {
+ if (t.line !== nexttoken.line) {
+ warning("Line breaking error '{a}'.", t, t.id);
+ }
+ }
+ function symbol(s, p) {
+ var x = syntax[s];
+ if (!x || typeof x !== 'object') {
+ syntax[s] = x = {
+ id: s,
+ lbp: p,
+ value: s
+ };
+ }
+ return x;
+ }
+ function delim(s) {
+ return symbol(s, 0);
+ }
+ function stmt(s, f) {
+ var x = delim(s);
+ x.identifier = x.reserved = true;
+ x.fud = f;
+ return x;
+ }
+ function blockstmt(s, f) {
+ var x = stmt(s, f);
+ x.block = true;
+ return x;
+ }
+ function reserveName(x) {
+ var c = x.id.charAt(0);
+ if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) {
+ x.identifier = x.reserved = true;
+ }
+ return x;
+ }
+ function prefix(s, f) {
+ var x = symbol(s, 150);
+ reserveName(x);
+ x.nud = (typeof f === 'function') ? f: function() {
+ if (option.plusplus && (this.id === '++' || this.id === '--')) {
+ warning("Unexpected use of '{a}'.", this, this.id);
+ }
+ this.right = parse(150);
+ this.arity = 'unary';
+ return this;
+ };
+ return x;
+ }
+ function type(s, f) {
+ var x = delim(s);
+ x.type = s;
+ x.nud = f;
+ return x;
+ }
+ function reserve(s, f) {
+ var x = type(s, f);
+ x.identifier = x.reserved = true;
+ return x;
+ }
+ function reservevar(s) {
+ return reserve(s,
+ function() {
+ if (this.id === 'this') {
+ if (option.safe) {
+ warning("ADsafe violation.", this);
+ }
+ }
+ return this;
+ });
+ }
+ function infix(s, f, p) {
+ var x = symbol(s, p);
+ reserveName(x);
+ x.led = (typeof f === 'function') ? f: function(left) {
+ nonadjacent(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ this.left = left;
+ this.right = parse(p);
+ return this;
+ };
+ return x;
+ }
+ function relation(s, f) {
+ var x = symbol(s, 100);
+ x.led = function(left) {
+ nonadjacent(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ var right = parse(100);
+ if ((left && left.id === 'NaN') || (right && right.id === 'NaN')) {
+ warning("Use the isNaN function to compare with NaN.", this);
+ } else if (f) {
+ f.apply(this, [left, right]);
+ }
+ this.left = left;
+ this.right = right;
+ return this;
+ };
+ return x;
+ }
+ function isPoorRelation(node) {
+ var n = +node.value;
+ return (node.type === '(number)' && !n) || (node.type === '(string)' && !node.value) || node.type === 'true' || node.type === 'false' || node.type === 'undefined' || node.type === 'null';
+ }
+ function assignop(s, f) {
+ symbol(s, 20).exps = true;
+ return infix(s,
+ function(left) {
+ var l;
+ this.left = left;
+ nonadjacent(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ if (option.safe) {
+ l = left;
+ do {
+ if (predefined[l.value] === true) {
+ warning('ADsafe violation.', l);
+ }
+ l = l.left;
+ } while ( l );
+ }
+ if (left) {
+ if (left.id === '.' || left.id === '[') {
+ if (left.left.value === 'arguments') {
+ warning('Bad assignment.', this);
+ }
+ this.right = parse(19);
+ return this;
+ } else if (left.identifier && !left.reserved) {
+ this.right = parse(19);
+ return this;
+ }
+ if (left === syntax['function']) {
+ warning("Expected an identifier in an assignment and instead saw a function invocation.", token);
+ }
+ }
+ error("Bad assignment.", this);
+ },
+ 20);
+ }
+ function bitwise(s, f, p) {
+ var x = symbol(s, p);
+ reserveName(x);
+ x.led = (typeof f === 'function') ? f: function(left) {
+ if (option.bitwise) {
+ warning("Unexpected use of '{a}'.", this, this.id);
+ }
+ nonadjacent(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ this.left = left;
+ this.right = parse(p);
+ return this;
+ };
+ return x;
+ }
+ function bitwiseassignop(s) {
+ symbol(s, 20).exps = true;
+ return infix(s,
+ function(left) {
+ if (option.bitwise) {
+ warning("Unexpected use of '{a}'.", this, this.id);
+ }
+ nonadjacent(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ if (left) {
+ if (left.id === '.' || left.id === '[' || (left.identifier && !left.reserved)) {
+ parse(19);
+ return left;
+ }
+ if (left === syntax['function']) {
+ warning("Expected an identifier in an assignment, and instead saw a function invocation.", token);
+ }
+ }
+ error("Bad assignment.", this);
+ },
+ 20);
+ }
+ function suffix(s, f) {
+ var x = symbol(s, 150);
+ x.led = function(left) {
+ if (option.plusplus) {
+ warning("Unexpected use of '{a}'.", this, this.id);
+ }
+ this.left = left;
+ return this;
+ };
+ return x;
+ }
+ function optionalidentifier() {
+ if (nexttoken.reserved) {
+ warning("Expected an identifier and instead saw '{a}' (a reserved word).", nexttoken, nexttoken.id);
+ }
+ if (nexttoken.identifier) {
+ advance();
+ return token.value;
+ }
+ }
+ function identifier() {
+ var i = optionalidentifier();
+ if (i) {
+ return i;
+ }
+ if (token.id === 'function' && nexttoken.id === '(') {
+ warning("Missing name in function statement.");
+ } else {
+ error("Expected an identifier and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ }
+ function reachable(s) {
+ var i = 0,
+ t;
+ if (nexttoken.id !== ';' || noreach) {
+ return;
+ }
+ for (;;) {
+ t = peek(i);
+ if (t.reach) {
+ return;
+ }
+ if (t.id !== '(endline)') {
+ if (t.id === 'function') {
+ warning("Inner functions should be listed at the top of the outer function.", t);
+ break;
+ }
+ warning("Unreachable '{a}' after '{b}'.", t, t.value, s);
+ break;
+ }
+ i += 1;
+ }
+ }
+ function statement(noindent) {
+ var i = indent,
+ r, s = scope,
+ t = nexttoken;
+ if (t.id === ';') {
+ warning("Unnecessary semicolon.", t);
+ advance(';');
+ return;
+ }
+ if (t.identifier && !t.reserved && peek().id === ':') {
+ advance();
+ advance(':');
+ scope = Object.create(s);
+ addlabel(t.value, 'label');
+ if (!nexttoken.labelled) {
+ warning("Label '{a}' on {b} statement.", nexttoken, t.value, nexttoken.value);
+ }
+ if (jx.test(t.value + ':')) {
+ warning("Label '{a}' looks like a javascript url.", t, t.value);
+ }
+ nexttoken.label = t.value;
+ t = nexttoken;
+ }
+ if (!noindent) {
+ indentation();
+ }
+ r = parse(0, true);
+ if (!t.block) {
+ if (nexttoken.id !== ';') {
+ warningAt("Missing semicolon.", token.line, token.from + token.value.length);
+ } else {
+ adjacent(token, nexttoken);
+ advance(';');
+ nonadjacent(token, nexttoken);
+ }
+ }
+ indent = i;
+ scope = s;
+ return r;
+ }
+ function statements(begin) {
+ var a = [];
+ if (begin) {
+ if (option.strict && nexttoken.type !== '(string)') {
+ warning('Missing "use strict" statement.', nexttoken);
+ }
+ if (nexttoken.type === '(string)' && nexttoken.value === 'use strict') {
+ advance();
+ advance(';');
+ }
+ }
+ if (option.adsafe) {
+ switch (begin) {
+ case 'script':
+ if (!adsafe_may) {
+ if (nexttoken.value !== 'ADSAFE' || peek(0).id !== '.' || (peek(1).value !== 'id' && peek(1).value !== 'go')) {
+ error('ADsafe violation: Missing ADSAFE.id or ADSAFE.go.', nexttoken);
+ }
+ }
+ if (nexttoken.value === 'ADSAFE' && peek(0).id === '.' && peek(1).value === 'id') {
+ if (adsafe_may) {
+ error('ADsafe violation.', nexttoken);
+ }
+ advance('ADSAFE');
+ advance('.');
+ advance('id');
+ advance('(');
+ if (nexttoken.value !== adsafe_id) {
+ error('ADsafe violation: id does not match.', nexttoken);
+ }
+ advance('(string)');
+ advance(')');
+ advance(';');
+ adsafe_may = true;
+ }
+ break;
+ case 'lib':
+ if (nexttoken.value === 'ADSAFE') {
+ advance('ADSAFE');
+ advance('.');
+ advance('lib');
+ advance('(');
+ advance('(string)');
+ advance(',');
+ parse(0);
+ advance(')');
+ advance(';');
+ return a;
+ } else {
+ error("ADsafe lib violation.");
+ }
+ }
+ }
+ while (!nexttoken.reach && nexttoken.id !== '(end)') {
+ if (nexttoken.id === ';') {
+ warning("Unnecessary semicolon.");
+ advance(';');
+ } else {
+ a.push(statement());
+ }
+ }
+ return a;
+ }
+ function block(f) {
+ var a, b = inblock,
+ s = scope,
+ t;
+ inblock = f;
+ if (f) {
+ scope = Object.create(scope);
+ }
+ nonadjacent(token, nexttoken);
+ t = nexttoken;
+ if (nexttoken.id === '{') {
+ advance('{');
+ if (nexttoken.id !== '}' || token.line !== nexttoken.line) {
+ indent += option.indent;
+ if (!f && nexttoken.from === indent + option.indent) {
+ indent += option.indent;
+ }
+ a = statements();
+ indent -= option.indent;
+ indentation();
+ }
+ advance('}', t);
+ } else {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, '{', nexttoken.value);
+ noreach = true;
+ a = [statement()];
+ noreach = false;
+ }
+ funct['(verb)'] = null;
+ scope = s;
+ inblock = b;
+ return a;
+ }
+ function idValue() {
+ return this;
+ }
+ function countMember(m) {
+ if (membersOnly && membersOnly[m] !== true) {
+ warning("Unexpected /*member '{a}'.", nexttoken, m);
+ }
+ if (typeof member[m] === 'number') {
+ member[m] += 1;
+ } else {
+ member[m] = 1;
+ }
+ }
+ function note_implied(token) {
+ var name = token.value,
+ line = token.line + 1,
+ a = implied[name];
+ if (!a) {
+ a = [line];
+ implied[name] = a;
+ } else if (a[a.length - 1] !== line) {
+ a.push(line);
+ }
+ }
+ function cssName() {
+ if (nexttoken.identifier) {
+ advance();
+ return true;
+ }
+ }
+ function cssNumber() {
+ if (nexttoken.id === '-') {
+ advance('-');
+ advance('(number)');
+ }
+ if (nexttoken.type === '(number)') {
+ advance();
+ return true;
+ }
+ }
+ function cssString() {
+ if (nexttoken.type === '(string)') {
+ advance();
+ return true;
+ }
+ }
+ function cssColor() {
+ var i, number;
+ if (nexttoken.identifier) {
+ if (nexttoken.value === 'rgb') {
+ advance();
+ advance('(');
+ for (i = 0; i < 3; i += 1) {
+ number = nexttoken.value;
+ if (nexttoken.type !== '(number)' || number < 0) {
+ warning("Expected a positive number and instead saw '{a}'", nexttoken, number);
+ advance();
+ } else {
+ advance();
+ if (nexttoken.id === '%') {
+ advance('%');
+ if (number > 100) {
+ warning("Expected a percentage and instead saw '{a}'", token, number);
+ }
+ } else {
+ if (number > 255) {
+ warning("Expected a small number and instead saw '{a}'", token, number);
+ }
+ }
+ }
+ }
+ advance(')');
+ return true;
+ } else if (cssColorData[nexttoken.value] === true) {
+ advance();
+ return true;
+ }
+ } else if (nexttoken.type === '(color)') {
+ advance();
+ return true;
+ }
+ return false;
+ }
+ function cssLength() {
+ if (nexttoken.id === '-') {
+ advance('-');
+ adjacent();
+ }
+ if (nexttoken.type === '(number)') {
+ advance();
+ if (nexttoken.type !== '(string)' && cssLengthData[nexttoken.value] === true) {
+ adjacent();
+ advance();
+ } else if ( + token.value !== 0) {
+ warning("Expected a linear unit and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ return true;
+ }
+ return false;
+ }
+ function cssLineHeight() {
+ if (nexttoken.id === '-') {
+ advance('-');
+ adjacent();
+ }
+ if (nexttoken.type === '(number)') {
+ advance();
+ if (nexttoken.type !== '(string)' && cssLengthData[nexttoken.value] === true) {
+ adjacent();
+ advance();
+ }
+ return true;
+ }
+ return false;
+ }
+ function cssWidth() {
+ if (nexttoken.identifier) {
+ switch (nexttoken.value) {
+ case 'thin':
+ case 'medium':
+ case 'thick':
+ advance();
+ return true;
+ }
+ } else {
+ return cssLength();
+ }
+ }
+ function cssMargin() {
+ if (nexttoken.identifier) {
+ if (nexttoken.value === 'auto') {
+ advance();
+ return true;
+ }
+ } else {
+ return cssLength();
+ }
+ }
+ function cssAttr() {
+ if (nexttoken.identifier && nexttoken.value === 'attr') {
+ advance();
+ advance('(');
+ if (!nexttoken.identifier) {
+ warning("Expected a name and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ advance(')');
+ return true;
+ }
+ return false;
+ }
+ function cssCommaList() {
+ while (nexttoken.id !== ';') {
+ if (!cssName() && !cssString()) {
+ warning("Expected a name and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ if (nexttoken.id !== ',') {
+ return true;
+ }
+ advance(',');
+ }
+ }
+ function cssCounter() {
+ if (nexttoken.identifier && nexttoken.value === 'counter') {
+ advance();
+ advance('(');
+ if (!nexttoken.identifier) {}
+ advance();
+ if (nexttoken.id === ',') {
+ advance(',');
+ if (nexttoken.type !== '(string)') {
+ warning("Expected a string and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ }
+ advance(')');
+ return true;
+ }
+ if (nexttoken.identifier && nexttoken.value === 'counters') {
+ advance();
+ advance('(');
+ if (!nexttoken.identifier) {
+ warning("Expected a name and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ if (nexttoken.id === ',') {
+ advance(',');
+ if (nexttoken.type !== '(string)') {
+ warning("Expected a string and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ }
+ if (nexttoken.id === ',') {
+ advance(',');
+ if (nexttoken.type !== '(string)') {
+ warning("Expected a string and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ }
+ advance(')');
+ return true;
+ }
+ return false;
+ }
+ function cssShape() {
+ var i;
+ if (nexttoken.identifier && nexttoken.value === 'rect') {
+ advance();
+ advance('(');
+ for (i = 0; i < 4; i += 1) {
+ if (!cssLength()) {
+ warning("Expected a number and instead saw '{a}'.", nexttoken, nexttoken.value);
+ break;
+ }
+ }
+ advance(')');
+ return true;
+ }
+ return false;
+ }
+ function cssUrl() {
+ var url;
+ if (nexttoken.identifier && nexttoken.value === 'url') {
+ nexttoken = lex.range('(', ')');
+ url = nexttoken.value;
+ advance();
+ if (option.safe && ux.test(url)) {
+ error("ADsafe URL violation.");
+ }
+ urls.push(url);
+ return true;
+ }
+ return false;
+ }
+ cssAny = [cssUrl,
+ function() {
+ for (;;) {
+ if (nexttoken.identifier) {
+ switch (nexttoken.value.toLowerCase()) {
+ case 'url':
+ cssUrl();
+ break;
+ case 'expression':
+ warning("Unexpected expression '{a}'.", nexttoken, nexttoken.value);
+ advance();
+ break;
+ default:
+ advance();
+ }
+ } else {
+ if (nexttoken.id === ';' || nexttoken.id === '!' || nexttoken.id === '(end)' || nexttoken.id === '}') {
+ return true;
+ }
+ advance();
+ }
+ }
+ }];
+ cssBorderStyle = ['none', 'hidden', 'dotted', 'dashed', 'solid', 'double', 'ridge', 'inset', 'outset'];
+ cssAttributeData = {
+ background: [true, 'background-attachment', 'background-color', 'background-image', 'background-position', 'background-repeat'],
+ 'background-attachment': ['scroll', 'fixed'],
+ 'background-color': ['transparent', cssColor],
+ 'background-image': ['none', cssUrl],
+ 'background-position': [2, [cssLength, 'top', 'bottom', 'left', 'right', 'center']],
+ 'background-repeat': ['repeat', 'repeat-x', 'repeat-y', 'no-repeat'],
+ 'border': [true, 'border-color', 'border-style', 'border-width'],
+ 'border-bottom': [true, 'border-bottom-color', 'border-bottom-style', 'border-bottom-width'],
+ 'border-bottom-color': cssColor,
+ 'border-bottom-style': cssBorderStyle,
+ 'border-bottom-width': cssWidth,
+ 'border-collapse': ['collapse', 'separate'],
+ 'border-color': ['transparent', 4, cssColor],
+ 'border-left': [true, 'border-left-color', 'border-left-style', 'border-left-width'],
+ 'border-left-color': cssColor,
+ 'border-left-style': cssBorderStyle,
+ 'border-left-width': cssWidth,
+ 'border-right': [true, 'border-right-color', 'border-right-style', 'border-right-width'],
+ 'border-right-color': cssColor,
+ 'border-right-style': cssBorderStyle,
+ 'border-right-width': cssWidth,
+ 'border-spacing': [2, cssLength],
+ 'border-style': [4, cssBorderStyle],
+ 'border-top': [true, 'border-top-color', 'border-top-style', 'border-top-width'],
+ 'border-top-color': cssColor,
+ 'border-top-style': cssBorderStyle,
+ 'border-top-width': cssWidth,
+ 'border-width': [4, cssWidth],
+ bottom: [cssLength, 'auto'],
+ 'caption-side': ['bottom', 'left', 'right', 'top'],
+ clear: ['both', 'left', 'none', 'right'],
+ clip: [cssShape, 'auto'],
+ color: cssColor,
+ content: ['open-quote', 'close-quote', 'no-open-quote', 'no-close-quote', cssString, cssUrl, cssCounter, cssAttr],
+ 'counter-increment': [cssName, 'none'],
+ 'counter-reset': [cssName, 'none'],
+ cursor: [cssUrl, 'auto', 'crosshair', 'default', 'e-resize', 'help', 'move', 'n-resize', 'ne-resize', 'nw-resize', 'pointer', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'text', 'wait'],
+ direction: ['ltr', 'rtl'],
+ display: ['block', 'compact', 'inline', 'inline-block', 'inline-table', 'list-item', 'marker', 'none', 'run-in', 'table', 'table-caption', 'table-column', 'table-column-group', 'table-footer-group', 'table-header-group', 'table-row', 'table-row-group'],
+ 'empty-cells': ['show', 'hide'],
+ 'float': ['left', 'none', 'right'],
+ font: ['caption', 'icon', 'menu', 'message-box', 'small-caption', 'status-bar', true, 'font-size', 'font-style', 'font-weight', 'font-family'],
+ 'font-family': cssCommaList,
+ 'font-size': ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large', 'larger', 'smaller', cssLength],
+ 'font-size-adjust': ['none', cssNumber],
+ 'font-stretch': ['normal', 'wider', 'narrower', 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'semi-expanded', 'expanded', 'extra-expanded'],
+ 'font-style': ['normal', 'italic', 'oblique'],
+ 'font-variant': ['normal', 'small-caps'],
+ 'font-weight': ['normal', 'bold', 'bolder', 'lighter', cssNumber],
+ height: [cssLength, 'auto'],
+ left: [cssLength, 'auto'],
+ 'letter-spacing': ['normal', cssLength],
+ 'line-height': ['normal', cssLineHeight],
+ 'list-style': [true, 'list-style-image', 'list-style-position', 'list-style-type'],
+ 'list-style-image': ['none', cssUrl],
+ 'list-style-position': ['inside', 'outside'],
+ 'list-style-type': ['circle', 'disc', 'square', 'decimal', 'decimal-leading-zero', 'lower-roman', 'upper-roman', 'lower-greek', 'lower-alpha', 'lower-latin', 'upper-alpha', 'upper-latin', 'hebrew', 'katakana', 'hiragana-iroha', 'katakana-oroha', 'none'],
+ margin: [4, cssMargin],
+ 'margin-bottom': cssMargin,
+ 'margin-left': cssMargin,
+ 'margin-right': cssMargin,
+ 'margin-top': cssMargin,
+ 'marker-offset': [cssLength, 'auto'],
+ 'max-height': [cssLength, 'none'],
+ 'max-width': [cssLength, 'none'],
+ 'min-height': cssLength,
+ 'min-width': cssLength,
+ opacity: cssNumber,
+ outline: [true, 'outline-color', 'outline-style', 'outline-width'],
+ 'outline-color': ['invert', cssColor],
+ 'outline-style': ['dashed', 'dotted', 'double', 'groove', 'inset', 'none', 'outset', 'ridge', 'solid'],
+ 'outline-width': cssWidth,
+ overflow: ['auto', 'hidden', 'scroll', 'visible'],
+ padding: [4, cssLength],
+ 'padding-bottom': cssLength,
+ 'padding-left': cssLength,
+ 'padding-right': cssLength,
+ 'padding-top': cssLength,
+ position: ['absolute', 'fixed', 'relative', 'static'],
+ quotes: [8, cssString],
+ right: [cssLength, 'auto'],
+ 'table-layout': ['auto', 'fixed'],
+ 'text-align': ['center', 'justify', 'left', 'right'],
+ 'text-decoration': ['none', 'underline', 'overline', 'line-through', 'blink'],
+ 'text-indent': cssLength,
+ 'text-shadow': ['none', 4, [cssColor, cssLength]],
+ 'text-transform': ['capitalize', 'uppercase', 'lowercase', 'none'],
+ top: [cssLength, 'auto'],
+ 'unicode-bidi': ['normal', 'embed', 'bidi-override'],
+ 'vertical-align': ['baseline', 'bottom', 'sub', 'super', 'top', 'text-top', 'middle', 'text-bottom', cssLength],
+ visibility: ['visible', 'hidden', 'collapse'],
+ 'white-space': ['normal', 'pre', 'nowrap'],
+ width: [cssLength, 'auto'],
+ 'word-spacing': ['normal', cssLength],
+ 'z-index': ['auto', cssNumber]
+ };
+ function styleAttribute() {
+ var v;
+ while (nexttoken.id === '*' || nexttoken.id === '#' || nexttoken.value === '_') {
+ if (!option.css) {
+ warning("Unexpected '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ }
+ if (nexttoken.id === '-') {
+ if (!option.css) {
+ warning("Unexpected '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance('-');
+ if (!nexttoken.identifier) {
+ warning("Expected a non-standard style attribute and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ advance();
+ return cssAny;
+ } else {
+ if (!nexttoken.identifier) {
+ warning("Excepted a style attribute, and instead saw '{a}'.", nexttoken, nexttoken.value);
+ } else {
+ if (cssAttributeData.hasOwnProperty(nexttoken.value)) {
+ v = cssAttributeData[nexttoken.value];
+ } else {
+ v = cssAny;
+ if (!option.css) {
+ warning("Unrecognized style attribute '{a}'.", nexttoken, nexttoken.value);
+ }
+ }
+ }
+ advance();
+ return v;
+ }
+ }
+ function styleValue(v) {
+ var i = 0,
+ n, once, match, round, start = 0,
+ vi;
+ switch (typeof v) {
+ case 'function':
+ return v();
+ case 'string':
+ if (nexttoken.identifier && nexttoken.value === v) {
+ advance();
+ return true;
+ }
+ return false;
+ }
+ for (;;) {
+ if (i >= v.length) {
+ return false;
+ }
+ vi = v[i];
+ i += 1;
+ if (vi === true) {
+ break;
+ } else if (typeof vi === 'number') {
+ n = vi;
+ vi = v[i];
+ i += 1;
+ } else {
+ n = 1;
+ }
+ match = false;
+ while (n > 0) {
+ if (styleValue(vi)) {
+ match = true;
+ n -= 1;
+ } else {
+ break;
+ }
+ }
+ if (match) {
+ return true;
+ }
+ }
+ start = i;
+ once = [];
+ for (;;) {
+ round = false;
+ for (i = start; i < v.length; i += 1) {
+ if (!once[i]) {
+ if (styleValue(cssAttributeData[v[i]])) {
+ match = true;
+ round = true;
+ once[i] = true;
+ break;
+ }
+ }
+ }
+ if (!round) {
+ return match;
+ }
+ }
+ }
+ function substyle() {
+ var v;
+ for (;;) {
+ if (nexttoken.id === '}' || nexttoken.id === '(end)' || xquote && nexttoken.id === xquote) {
+ return;
+ }
+ while (nexttoken.id === ';') {
+ warning("Misplaced ';'.");
+ advance(';');
+ }
+ v = styleAttribute();
+ advance(':');
+ if (nexttoken.identifier && nexttoken.value === 'inherit') {
+ advance();
+ } else {
+ styleValue(v);
+ }
+ while (nexttoken.id !== ';' && nexttoken.id !== '!' && nexttoken.id !== '}' && nexttoken.id !== '(end)' && nexttoken.id !== xquote) {
+ warning("Unexpected token '{a}'.", nexttoken, nexttoken.value);
+ advance();
+ }
+ if (nexttoken.id === '!') {
+ advance('!');
+ adjacent();
+ if (nexttoken.identifier && nexttoken.value === 'important') {
+ advance();
+ } else {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, 'important', nexttoken.value);
+ }
+ }
+ if (nexttoken.id === '}' || nexttoken.id === xquote) {
+ warning("Missing '{a}'.", nexttoken, ';');
+ } else {
+ advance(';');
+ }
+ }
+ }
+ function stylePattern() {
+ var name;
+ if (nexttoken.id === '{') {
+ warning("Expected a style pattern, and instead saw '{a}'.", nexttoken, nexttoken.id);
+ } else if (nexttoken.id === '@') {
+ advance('@');
+ name = nexttoken.value;
+ if (nexttoken.identifier && atrule[name] === true) {
+ advance();
+ return name;
+ }
+ warning("Expected an at-rule, and instead saw @{a}.", nexttoken, name);
+ }
+ for (;;) {
+ if (nexttoken.identifier) {
+ if (!htmltag.hasOwnProperty(nexttoken.value)) {
+ warning("Expected a tagName, and instead saw {a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ } else {
+ switch (nexttoken.id) {
+ case '>':
+ case '+':
+ advance();
+ if (!nexttoken.identifier || !htmltag.hasOwnProperty(nexttoken.value)) {
+ warning("Expected a tagName, and instead saw {a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ break;
+ case ':':
+ advance(':');
+ if (pseudorule[nexttoken.value] !== true) {
+ warning("Expected a pseudo, and instead saw :{a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ if (nexttoken.value === 'lang') {
+ advance('(');
+ if (!nexttoken.identifier) {
+ warning("Expected a lang code, and instead saw :{a}.", nexttoken, nexttoken.value);
+ }
+ advance(')');
+ }
+ break;
+ case '#':
+ advance('#');
+ if (!nexttoken.identifier) {
+ warning("Expected an id, and instead saw #{a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ break;
+ case '*':
+ advance('*');
+ break;
+ case '.':
+ advance('.');
+ if (!nexttoken.identifier) {
+ warning("Expected a class, and instead saw #.{a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ break;
+ case '[':
+ advance('[');
+ if (!nexttoken.identifier) {
+ warning("Expected an attribute, and instead saw [{a}].", nexttoken, nexttoken.value);
+ }
+ advance();
+ if (nexttoken.id === '=' || nexttoken.id === '~=' || nexttoken.id === '|=') {
+ advance();
+ if (nexttoken.type !== '(string)') {
+ warning("Expected a string, and instead saw {a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ }
+ advance(']');
+ break;
+ default:
+ error("Expected a CSS selector, and instead saw {a}.", nexttoken, nexttoken.value);
+ }
+ }
+ if (nexttoken.id === '</' || nexttoken.id === '{' || nexttoken.id === '(end)') {
+ return '';
+ }
+ if (nexttoken.id === ',') {
+ advance(',');
+ }
+ }
+ }
+ function styles() {
+ while (nexttoken.id !== '</' && nexttoken.id !== '(end)') {
+ stylePattern();
+ xmode = 'styleproperty';
+ if (nexttoken.id === ';') {
+ advance(';');
+ } else {
+ advance('{');
+ substyle();
+ xmode = 'style';
+ advance('}');
+ }
+ }
+ }
+ function doBegin(n) {
+ if (n !== 'html' && !option.fragment) {
+ if (n === 'div' && option.adsafe) {
+ error("ADSAFE: Use the fragment option.");
+ } else {
+ error("Expected '{a}' and instead saw '{b}'.", token, 'html', n);
+ }
+ }
+ if (option.adsafe) {
+ if (n === 'html') {
+ error("Currently, ADsafe does not operate on whole HTML documents. It operates on <div> fragments and .js files.", token);
+ }
+ if (option.fragment) {
+ if (n !== 'div') {
+ error("ADsafe violation: Wrap the widget in a div.", token);
+ }
+ } else {
+ error("Use the fragment option.", token);
+ }
+ }
+ option.browser = true;
+ assume();
+ }
+ function doAttribute(n, a, v) {
+ var u;
+ if (a === 'id') {
+ u = typeof v === 'string' ? v.toUpperCase() : '';
+ if (ids[u] === true) {
+ warning("Duplicate id='{a}'.", nexttoken, v);
+ }
+ ids[u] = true;
+ if (option.adsafe) {
+ if (adsafe_id) {
+ if (v.slice(0, adsafe_id.length) !== adsafe_id) {
+ warning("ADsafe violation: An id must have a '{a}' prefix", nexttoken, adsafe_id);
+ } else if (!/^[A-Z]+_[A-Z]+$/.test(v)) {
+ warning("ADSAFE violation: bad id.");
+ }
+ } else {
+ adsafe_id = v;
+ if (!/^[A-Z]+_$/.test(v)) {
+ warning("ADSAFE violation: bad id.");
+ }
+ }
+ }
+ } else if (a === 'href' || a === 'background' || a === 'content' || a === 'data' || a.indexOf('src') >= 0 || a.indexOf('url') >= 0) {
+ if (option.safe && ux.test(v)) {
+ error("ADsafe URL violation.");
+ }
+ urls.push(v);
+ } else if (a === 'for') {
+ if (option.adsafe) {
+ if (adsafe_id) {
+ if (v.slice(0, adsafe_id.length) !== adsafe_id) {
+ warning("ADsafe violation: An id must have a '{a}' prefix", nexttoken, adsafe_id);
+ } else if (!/^[A-Z]+_[A-Z]+$/.test(v)) {
+ warning("ADSAFE violation: bad id.");
+ }
+ } else {
+ warning("ADSAFE violation: bad id.");
+ }
+ }
+ } else if (a === 'name') {
+ if (option.adsafe && v.indexOf('_') >= 0) {
+ warning("ADsafe name violation.");
+ }
+ }
+ }
+ function doTag(n, a) {
+ var i, t = htmltag[n],
+ x;
+ src = false;
+ if (!t) {
+ error("Unrecognized tag '<{a}>'.", nexttoken, n === n.toLowerCase() ? n: n + ' (capitalization error)');
+ }
+ if (stack.length > 0) {
+ if (n === 'html') {
+ error("Too many <html> tags.", token);
+ }
+ x = t.parent;
+ if (x) {
+ if (x.indexOf(' ' + stack[stack.length - 1].name + ' ') < 0) {
+ error("A '<{a}>' must be within '<{b}>'.", token, n, x);
+ }
+ } else if (!option.adsafe || !option.fragment) {
+ i = stack.length;
+ do {
+ if (i <= 0) {
+ error("A '<{a}>' must be within '<{b}>'.", token, n, 'body');
+ }
+ i -= 1;
+ } while ( stack [ i ].name !== 'body');
+ }
+ }
+ switch (n) {
+ case 'div':
+ if (option.adsafe && stack.length === 1 && !adsafe_id) {
+ warning("ADSAFE violation: missing ID_.");
+ }
+ break;
+ case 'script':
+ xmode = 'script';
+ advance('>');
+ indent = nexttoken.from;
+ if (a.lang) {
+ warning("lang is deprecated.", token);
+ }
+ if (option.adsafe && stack.length !== 1) {
+ warning("ADsafe script placement violation.", token);
+ }
+ if (a.src) {
+ if (option.adsafe && (!adsafe_may || !approved[a.src])) {
+ warning("ADsafe unapproved script source.", token);
+ }
+ if (a.type) {
+ warning("type is unnecessary.", token);
+ }
+ } else {
+ if (adsafe_went) {
+ error("ADsafe script violation.", token);
+ }
+ statements('script');
+ }
+ xmode = 'html';
+ advance('</');
+ if (!nexttoken.identifier && nexttoken.value !== 'script') {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, 'script', nexttoken.value);
+ }
+ advance();
+ xmode = 'outer';
+ break;
+ case 'style':
+ xmode = 'style';
+ advance('>');
+ styles();
+ xmode = 'html';
+ advance('</');
+ if (!nexttoken.identifier && nexttoken.value !== 'style') {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, 'style', nexttoken.value);
+ }
+ advance();
+ xmode = 'outer';
+ break;
+ case 'input':
+ switch (a.type) {
+ case 'radio':
+ case 'checkbox':
+ case 'text':
+ case 'button':
+ case 'file':
+ case 'reset':
+ case 'submit':
+ case 'password':
+ case 'file':
+ case 'hidden':
+ case 'image':
+ break;
+ default:
+ warning("Bad input type.");
+ }
+ if (option.adsafe && a.autocomplete !== 'off') {
+ warning("ADsafe autocomplete violation.");
+ }
+ break;
+ case 'applet':
+ case 'body':
+ case 'embed':
+ case 'frame':
+ case 'frameset':
+ case 'head':
+ case 'iframe':
+ case 'img':
+ case 'object':
+ case 'param':
+ if (option.adsafe) {
+ warning("ADsafe violation: Disallowed tag: " + n);
+ }
+ break;
+ }
+ }
+ function closetag(n) {
+ return '</' + n + '>';
+ }
+ function html() {
+ var a, attributes, e, n, q, t, v, wmode;
+ xmode = 'html';
+ xquote = '';
+ stack = null;
+ for (;;) {
+ switch (nexttoken.value) {
+ case '<':
+ xmode = 'html';
+ advance('<');
+ attributes = {};
+ t = nexttoken;
+ if (!t.identifier) {
+ warning("Bad identifier {a}.", t, t.value);
+ }
+ n = t.value;
+ if (option.cap) {
+ n = n.toLowerCase();
+ }
+ t.name = n;
+ advance();
+ if (!stack) {
+ stack = [];
+ doBegin(n);
+ }
+ v = htmltag[n];
+ if (typeof v !== 'object') {
+ error("Unrecognized tag '<{a}>'.", t, n);
+ }
+ e = v.empty;
+ t.type = n;
+ for (;;) {
+ if (nexttoken.id === '/') {
+ advance('/');
+ if (nexttoken.id !== '>') {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, '>', nexttoken.value);
+ }
+ break;
+ }
+ if (nexttoken.id && nexttoken.id.substr(0, 1) === '>') {
+ break;
+ }
+ if (!nexttoken.identifier) {
+ if (nexttoken.id === '(end)' || nexttoken.id === '(error)') {
+ error("Missing '>'.", nexttoken);
+ }
+ warning("Bad identifier.");
+ }
+ a = nexttoken.value;
+ advance();
+ if (!option.cap && a !== a.toLowerCase()) {
+ warning("Attribute '{a}' not all lower case.", nexttoken, a);
+ }
+ a = a.toLowerCase();
+ xquote = '';
+ if (attributes.hasOwnProperty(a)) {
+ warning("Attribute '{a}' repeated.", nexttoken, a);
+ }
+ if (a.slice(0, 2) === 'on') {
+ if (!option.on) {
+ warning("Avoid HTML event handlers.");
+ }
+ xmode = 'scriptstring';
+ advance('=');
+ q = nexttoken.id;
+ if (q !== '"' && q !== "'") {
+ error("Missing quote.");
+ }
+ xquote = q;
+ wmode = option.white;
+ option.white = false;
+ advance(q);
+ statements('on');
+ option.white = wmode;
+ if (nexttoken.id !== q) {
+ error("Missing close quote on script attribute.");
+ }
+ xmode = 'html';
+ xquote = '';
+ advance(q);
+ v = false;
+ } else if (a === 'style') {
+ xmode = 'scriptstring';
+ advance('=');
+ q = nexttoken.id;
+ if (q !== '"' && q !== "'") {
+ error("Missing quote.");
+ }
+ xmode = 'styleproperty';
+ xquote = q;
+ advance(q);
+ substyle();
+ xmode = 'html';
+ xquote = '';
+ advance(q);
+ v = false;
+ } else {
+ if (nexttoken.id === '=') {
+ advance('=');
+ v = nexttoken.value;
+ if (!nexttoken.identifier && nexttoken.id !== '"' && nexttoken.id !== '\'' && nexttoken.type !== '(string)' && nexttoken.type !== '(number)' && nexttoken.type !== '(color)') {
+ warning("Expected an attribute value and instead saw '{a}'.", token, a);
+ }
+ advance();
+ } else {
+ v = true;
+ }
+ }
+ attributes[a] = v;
+ doAttribute(n, a, v);
+ }
+ doTag(n, attributes);
+ if (!e) {
+ stack.push(t);
+ }
+ xmode = 'outer';
+ advance('>');
+ break;
+ case '</':
+ xmode = 'html';
+ advance('</');
+ if (!nexttoken.identifier) {
+ warning("Bad identifier.");
+ }
+ n = nexttoken.value;
+ if (option.cap) {
+ n = n.toLowerCase();
+ }
+ advance();
+ if (!stack) {
+ error("Unexpected '{a}'.", nexttoken, closetag(n));
+ }
+ t = stack.pop();
+ if (!t) {
+ error("Unexpected '{a}'.", nexttoken, closetag(n));
+ }
+ if (t.name !== n) {
+ error("Expected '{a}' and instead saw '{b}'.", nexttoken, closetag(t.name), closetag(n));
+ }
+ if (nexttoken.id !== '>') {
+ error("Missing '{a}'.", nexttoken, '>');
+ }
+ xmode = 'outer';
+ advance('>');
+ break;
+ case '<!':
+ if (option.safe) {
+ error("ADsafe HTML violation.");
+ }
+ xmode = 'outer';
+ v = false;
+ for (;;) {
+ advance();
+ if (nexttoken.id === '>') {
+ break;
+ }
+ if (nexttoken.id === '<' || nexttoken.id === '(end)') {
+ error("Missing '{a}'.", token, '>');
+ }
+ if (nexttoken.id === '--') {
+ v = !v;
+ }
+ }
+ if (v) {
+ warning("Misshapen HTML comment.");
+ }
+ xmode = 'html';
+ advance('>');
+ break;
+ case '(end)':
+ return;
+ default:
+ if (nexttoken.id === '(end)') {
+ error("Missing '{a}'.", nexttoken, '</html>');
+ } else if (nexttoken.id !== '--' && nexttoken.id !== '#') {
+ error("Unexpected '{a}'.", nexttoken, nexttoken.value);
+ } else {
+ advance();
+ }
+ }
+ if (stack && stack.length === 0) {
+ break;
+ }
+ }
+ if (nexttoken.id !== '(end)') {
+ error("Unexpected material after the end.");
+ }
+ }
+ type('(number)', idValue);
+ type('(string)', idValue);
+ syntax['(identifier)'] = {
+ type: '(identifier)',
+ lbp: 0,
+ identifier: true,
+ nud: function() {
+ var v = this.value,
+ s = scope[v];
+ if (s && (s === funct || s === funct['(global)'])) {
+ if (!funct['(global)']) {
+ switch (funct[v]) {
+ case 'unused':
+ funct[v] = 'var';
+ break;
+ case 'label':
+ warning("'{a}' is a statement label.", token, v);
+ break;
+ }
+ }
+ } else if (funct['(global)']) {
+ if (option.undef) {
+ warning("'{a}' is undefined.", token, v);
+ }
+ note_implied(token);
+ } else {
+ switch (funct[v]) {
+ case 'closure':
+ case 'function':
+ case 'var':
+ case 'unused':
+ warning("'{a}' used out of scope.", token, v);
+ break;
+ case 'label':
+ warning("'{a}' is a statement label.", token, v);
+ break;
+ case 'outer':
+ case true:
+ break;
+ default:
+ if (s === true) {
+ funct[v] = true;
+ } else if (typeof s !== 'object') {
+ if (option.undef) {
+ warning("'{a}' is undefined.", token, v);
+ } else {
+ funct[v] = true;
+ }
+ note_implied(token);
+ } else {
+ switch (s[v]) {
+ case 'function':
+ case 'var':
+ case 'unused':
+ s[v] = 'closure';
+ funct[v] = 'outer';
+ break;
+ case 'closure':
+ case 'parameter':
+ funct[v] = 'outer';
+ break;
+ case 'label':
+ warning("'{a}' is a statement label.", token, v);
+ }
+ }
+ }
+ }
+ return this;
+ },
+ led: function() {
+ error("Expected an operator and instead saw '{a}'.", nexttoken, nexttoken.value);
+ }
+ };
+ type('(regexp)',
+ function() {
+ return this;
+ });
+ delim('(endline)');
+ delim('(begin)');
+ delim('(end)').reach = true;
+ delim('</').reach = true;
+ delim('<!');
+ delim('(error)').reach = true;
+ delim('}').reach = true;
+ delim(')');
+ delim(']');
+ delim('"').reach = true;
+ delim("'").reach = true;
+ delim(';');
+ delim(':').reach = true;
+ delim(',');
+ delim('#');
+ delim('@');
+ reserve('else');
+ reserve('case').reach = true;
+ reserve('catch');
+ reserve('default').reach = true;
+ reserve('finally');
+ reservevar('arguments');
+ reservevar('eval');
+ reservevar('false');
+ reservevar('Infinity');
+ reservevar('NaN');
+ reservevar('null');
+ reservevar('this');
+ reservevar('true');
+ reservevar('undefined');
+ assignop('=', 'assign', 20);
+ assignop('+=', 'assignadd', 20);
+ assignop('-=', 'assignsub', 20);
+ assignop('*=', 'assignmult', 20);
+ assignop('/=', 'assigndiv', 20).nud = function() {
+ error("A regular expression literal can be confused with '/='.");
+ };
+ assignop('%=', 'assignmod', 20);
+ bitwiseassignop('&=', 'assignbitand', 20);
+ bitwiseassignop('|=', 'assignbitor', 20);
+ bitwiseassignop('^=', 'assignbitxor', 20);
+ bitwiseassignop('<<=', 'assignshiftleft', 20);
+ bitwiseassignop('>>=', 'assignshiftright', 20);
+ bitwiseassignop('>>>=', 'assignshiftrightunsigned', 20);
+ infix('?',
+ function(left) {
+ parse(10);
+ advance(':');
+ parse(10);
+ },
+ 30);
+ infix('||', 'or', 40);
+ infix('&&', 'and', 50);
+ bitwise('|', 'bitor', 70);
+ bitwise('^', 'bitxor', 80);
+ bitwise('&', 'bitand', 90);
+ relation('==',
+ function(left, right) {
+ if (option.eqeqeq) {
+ warning("Expected '{a}' and instead saw '{b}'.", this, '===', '==');
+ } else if (isPoorRelation(left)) {
+ warning("Use '{a}' to compare with '{b}'.", this, '===', left.value);
+ } else if (isPoorRelation(right)) {
+ warning("Use '{a}' to compare with '{b}'.", this, '===', right.value);
+ }
+ return this;
+ });
+ relation('===');
+ relation('!=',
+ function(left, right) {
+ if (option.eqeqeq) {
+ warning("Expected '{a}' and instead saw '{b}'.", this, '!==', '!=');
+ } else if (isPoorRelation(left)) {
+ warning("Use '{a}' to compare with '{b}'.", this, '!==', left.value);
+ } else if (isPoorRelation(right)) {
+ warning("Use '{a}' to compare with '{b}'.", this, '!==', right.value);
+ }
+ return this;
+ });
+ relation('!==');
+ relation('<');
+ relation('>');
+ relation('<=');
+ relation('>=');
+ bitwise('<<', 'shiftleft', 120);
+ bitwise('>>', 'shiftright', 120);
+ bitwise('>>>', 'shiftrightunsigned', 120);
+ infix('in', 'in', 120);
+ infix('instanceof', 'instanceof', 120);
+ infix('+',
+ function(left) {
+ nonadjacent(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ var right = parse(130);
+ if (left && right && left.id === '(string)' && right.id === '(string)') {
+ left.value += right.value;
+ left.character = right.character;
+ if (jx.test(left.value)) {
+ warning("JavaScript URL.", left);
+ }
+ return left;
+ }
+ this.left = left;
+ this.right = right;
+ return this;
+ },
+ 130);
+ prefix('+', 'num');
+ infix('-', 'sub', 130);
+ prefix('-', 'neg');
+ infix('*', 'mult', 140);
+ infix('/', 'div', 140);
+ infix('%', 'mod', 140);
+ suffix('++', 'postinc');
+ prefix('++', 'preinc');
+ syntax['++'].exps = true;
+ suffix('--', 'postdec');
+ prefix('--', 'predec');
+ syntax['--'].exps = true;
+ prefix('delete',
+ function() {
+ var p = parse(0);
+ if (p.id !== '.' && p.id !== '[') {
+ warning("Expected '{a}' and instead saw '{b}'.", nexttoken, '.', nexttoken.value);
+ }
+ }).exps = true;
+ prefix('~',
+ function() {
+ if (option.bitwise) {
+ warning("Unexpected '{a}'.", this, '~');
+ }
+ parse(150);
+ return this;
+ });
+ prefix('!', 'not');
+ prefix('typeof', 'typeof');
+ prefix('new',
+ function() {
+ var c = parse(155),
+ i;
+ if (c && c.id !== 'function') {
+ if (c.identifier) {
+ c['new'] = true;
+ switch (c.value) {
+ case 'Object':
+ warning("Use the object literal notation {}.", token);
+ break;
+ case 'Array':
+ warning("Use the array literal notation [].", token);
+ break;
+ case 'Number':
+ case 'String':
+ case 'Boolean':
+ case 'Math':
+ warning("Do not use the {a} function as a constructor.", token, c.value);
+ break;
+ case 'Function':
+ if (!option.evil) {
+ warning("The Function constructor is eval.");
+ }
+ break;
+ case 'Date':
+ case 'RegExp':
+ break;
+ default:
+ if (c.id !== 'function') {
+ i = c.value.substr(0, 1);
+ if (i < 'A' || i > 'Z') {
+ warning("A constructor name should start with an uppercase letter.", token);
+ }
+ }
+ }
+ } else {
+ if (c.id !== '.' && c.id !== '[' && c.id !== '(') {
+ warning("Bad constructor.", token);
+ }
+ }
+ } else {
+ warning("Weird construction. Delete 'new'.", this);
+ }
+ adjacent(token, nexttoken);
+ if (nexttoken.id !== '(') {
+ warning("Missing '()' invoking a constructor.");
+ }
+ this.first = c;
+ return this;
+ });
+ syntax['new'].exps = true;
+ infix('.',
+ function(left) {
+ adjacent(prevtoken, token);
+ var t = this,
+ m = identifier();
+ if (typeof m === 'string') {
+ countMember(m);
+ }
+ t.left = left;
+ t.right = m;
+ if (!option.evil && left && left.value === 'document' && (m === 'write' || m === 'writeln')) {
+ warning("document.write can be a form of eval.", left);
+ }
+ if (option.adsafe) {
+ if (left && left.value === 'ADSAFE') {
+ if (m === 'id' || m === 'lib') {
+ warning("ADsafe violation.", this);
+ } else if (m === 'go') {
+ if (xmode !== 'script') {
+ warning("ADsafe violation.", this);
+ } else if (adsafe_went || nexttoken.id !== '(' || peek(0).id !== '(string)' || peek(0).value !== adsafe_id || peek(1).id !== ',') {
+ error("ADsafe violation: go.", this);
+ }
+ adsafe_went = true;
+ adsafe_may = false;
+ }
+ }
+ for (;;) {
+ if (banned[m] === true) {
+ warning("ADsafe restricted word '{a}'.", token, m);
+ }
+ if (predefined[left.value] !== true || nexttoken.id === '(') {
+ break;
+ }
+ if (standard_member[m] === true) {
+ if (nexttoken.id === '.') {
+ warning("ADsafe violation.", this);
+ }
+ break;
+ }
+ if (nexttoken.id !== '.') {
+ warning("ADsafe violation.", this);
+ break;
+ }
+ advance('.');
+ token.left = t;
+ token.right = m;
+ t = token;
+ m = identifier();
+ if (typeof m === 'string') {
+ countMember(m);
+ }
+ }
+ }
+ return t;
+ },
+ 160);
+ infix('(',
+ function(left) {
+ adjacent(prevtoken, token);
+ nospace();
+ var n = 0,
+ p = [];
+ if (left) {
+ if (left.type === '(identifier)') {
+ if (left.value.match(/^[A-Z]([A-Z0-9_$]*[a-z][A-Za-z0-9_$]*)?$/)) {
+ if (left.value !== 'Number' && left.value !== 'String' && left.value !== 'Boolean' && left.value !== 'Date') {
+ if (left.value === 'Math') {
+ warning("Math is not a function.", left);
+ } else {
+ warning("Missing 'new' prefix when invoking a constructor.", left);
+ }
+ }
+ }
+ } else if (left.id === '.') {
+ if (option.safe && left.left.value === 'Math' && left.right === 'random') {
+ warning("ADsafe violation.", left);
+ }
+ }
+ }
+ if (nexttoken.id !== ')') {
+ for (;;) {
+ p[p.length] = parse(10);
+ n += 1;
+ if (nexttoken.id !== ',') {
+ break;
+ }
+ advance(',');
+ nonadjacent(token, nexttoken);
+ }
+ }
+ advance(')');
+ nospace(prevtoken, token);
+ if (typeof left === 'object') {
+ if (left.value === 'parseInt' && n === 1) {
+ warning("Missing radix parameter.", left);
+ }
+ if (!option.evil) {
+ if (left.value === 'eval' || left.value === 'Function' || left.value === 'execScript') {
+ warning("eval is evil.", left);
+ } else if (p[0] && p[0].id === '(string)' && (left.value === 'setTimeout' || left.value === 'setInterval')) {
+ warning("Implied eval is evil. Pass a function instead of a string.", left);
+ }
+ }
+ if (!left.identifier && left.id !== '.' && left.id !== '[' && left.id !== '(' && left.id !== '&&' && left.id !== '||' && left.id !== '?') {
+ warning("Bad invocation.", left);
+ }
+ }
+ this.left = left;
+ return this;
+ },
+ 155).exps = true;
+ prefix('(',
+ function() {
+ nospace();
+ var t = nexttoken,
+ v = parse(0);
+ advance(')', this);
+ nospace(prevtoken, token);
+ return v;
+ });
+ infix('[',
+ function(left) {
+ nospace();
+ var e = parse(0),
+ s;
+ if (e && e.type === '(string)') {
+ if (option.safe && banned[e.value] === true) {
+ warning("ADsafe restricted word '{a}'.", this, e.value);
+ }
+ countMember(e.value);
+ if (!option.sub && ix.test(e.value)) {
+ s = syntax[e.value];
+ if (!s || !s.reserved) {
+ warning("['{a}'] is better written in dot notation.", e, e.value);
+ }
+ }
+ } else if (!e || (e.type !== '(number)' && (e.id !== '+' || e.arity !== 'unary'))) {
+ if (option.safe) {
+ warning('ADsafe subscripting.');
+ }
+ }
+ advance(']', this);
+ nospace(prevtoken, token);
+ this.left = left;
+ this.right = e;
+ return this;
+ },
+ 160);
+ prefix('[',
+ function() {
+ if (nexttoken.id === ']') {
+ advance(']');
+ return;
+ }
+ var b = token.line !== nexttoken.line;
+ if (b) {
+ indent += option.indent;
+ if (nexttoken.from === indent + option.indent) {
+ indent += option.indent;
+ }
+ }
+ for (;;) {
+ if (b && token.line !== nexttoken.line) {
+ indentation();
+ }
+ parse(10);
+ if (nexttoken.id === ',') {
+ adjacent(token, nexttoken);
+ advance(',');
+ if (nexttoken.id === ',') {
+ warning("Extra comma.", token);
+ } else if (nexttoken.id === ']') {
+ warning("Extra comma.", token);
+ break;
+ }
+ nonadjacent(token, nexttoken);
+ } else {
+ if (b) {
+ indent -= option.indent;
+ indentation();
+ }
+ break;
+ }
+ }
+ advance(']', this);
+ return;
+ },
+ 160); (function(x) {
+ x.nud = function() {
+ var b, i, s;
+ if (nexttoken.id === '}') {
+ advance('}');
+ return;
+ }
+ b = token.line !== nexttoken.line;
+ if (b) {
+ indent += option.indent;
+ if (nexttoken.from === indent + option.indent) {
+ indent += option.indent;
+ }
+ }
+ for (;;) {
+ if (b) {
+ indentation();
+ }
+ i = optionalidentifier(true);
+ if (!i) {
+ if (nexttoken.id === '(string)') {
+ i = nexttoken.value;
+ if (ix.test(i)) {
+ s = syntax[i];
+ }
+ advance();
+ } else if (nexttoken.id === '(number)') {
+ i = nexttoken.value.toString();
+ advance();
+ } else {
+ error("Expected '{a}' and instead saw '{b}'.", nexttoken, '}', nexttoken.value);
+ }
+ }
+ countMember(i);
+ advance(':');
+ nonadjacent(token, nexttoken);
+ parse(10);
+ if (nexttoken.id === ',') {
+ adjacent(token, nexttoken);
+ advance(',');
+ if (nexttoken.id === ',' || nexttoken.id === '}') {
+ warning("Extra comma.", token);
+ }
+ nonadjacent(token, nexttoken);
+ } else {
+ if (b) {
+ indent -= option.indent;
+ indentation();
+ }
+ advance('}', this);
+ return;
+ }
+ }
+ };
+ x.fud = function() {
+ error("Expected to see a statement and instead saw a block.", token);
+ };
+ })(delim('{'));
+ function varstatement(prefix) {
+ if (funct['(onevar)'] && option.onevar) {
+ warning("Too many var statements.");
+ } else if (!funct['(global)']) {
+ funct['(onevar)'] = true;
+ }
+ for (;;) {
+ nonadjacent(token, nexttoken);
+ addlabel(identifier(), 'unused');
+ if (prefix) {
+ return;
+ }
+ if (nexttoken.id === '=') {
+ nonadjacent(token, nexttoken);
+ advance('=');
+ nonadjacent(token, nexttoken);
+ if (peek(0).id === '=') {
+ error("Variable {a} was not declared correctly.", nexttoken, nexttoken.value);
+ }
+ parse(20);
+ }
+ if (nexttoken.id !== ',') {
+ return;
+ }
+ adjacent(token, nexttoken);
+ advance(',');
+ nonadjacent(token, nexttoken);
+ }
+ }
+ stmt('var', varstatement);
+ stmt('new',
+ function() {
+ error("'new' should not be used as a statement.");
+ });
+ function functionparams() {
+ var i, t = nexttoken,
+ p = [];
+ advance('(');
+ nospace();
+ if (nexttoken.id === ')') {
+ advance(')');
+ nospace(prevtoken, token);
+ return;
+ }
+ for (;;) {
+ i = identifier();
+ p.push(i);
+ addlabel(i, 'parameter');
+ if (nexttoken.id === ',') {
+ advance(',');
+ nonadjacent(token, nexttoken);
+ } else {
+ advance(')', t);
+ nospace(prevtoken, token);
+ return p.join(', ');
+ }
+ }
+ }
+ function doFunction(i) {
+ var s = scope;
+ scope = Object.create(s);
+ funct = {
+ '(name)': i || '"' + anonname + '"',
+ '(line)': nexttoken.line + 1,
+ '(context)': funct,
+ '(breakage)': 0,
+ '(loopage)': 0,
+ '(scope)': scope
+ };
+ functions.push(funct);
+ if (i) {
+ addlabel(i, 'function');
+ }
+ funct['(params)'] = functionparams();
+ block(false);
+ scope = s;
+ funct = funct['(context)'];
+ }
+ blockstmt('function',
+ function() {
+ if (inblock) {
+ warning("Function statements cannot be placed in blocks. Use a function expression or move the statement to the top of the outer function.", token);
+ }
+ var i = identifier();
+ adjacent(token, nexttoken);
+ addlabel(i, 'unused');
+ doFunction(i);
+ if (nexttoken.id === '(' && nexttoken.line === token.line) {
+ error("Function statements are not invocable. Wrap the function expression in parens.");
+ }
+ });
+ prefix('function',
+ function() {
+ var i = optionalidentifier();
+ if (i) {
+ adjacent(token, nexttoken);
+ } else {
+ nonadjacent(token, nexttoken);
+ }
+ doFunction(i);
+ if (funct['(loopage)'] && nexttoken.id !== '(') {
+ warning("Be careful when making functions within a loop. Consider putting the function in a closure.");
+ }
+ return this;
+ });
+ blockstmt('if',
+ function() {
+ var t = nexttoken;
+ advance('(');
+ nonadjacent(this, t);
+ nospace();
+ parse(20);
+ if (nexttoken.id === '=') {
+ warning("Expected a conditional expression and instead saw an assignment.");
+ advance('=');
+ parse(20);
+ }
+ advance(')', t);
+ nospace(prevtoken, token);
+ block(true);
+ if (nexttoken.id === 'else') {
+ nonadjacent(token, nexttoken);
+ advance('else');
+ if (nexttoken.id === 'if' || nexttoken.id === 'switch') {
+ statement(true);
+ } else {
+ block(true);
+ }
+ }
+ return this;
+ });
+ blockstmt('try',
+ function() {
+ var b, e, s;
+ if (option.adsafe) {
+ warning("ADsafe try violation.", this);
+ }
+ block(false);
+ if (nexttoken.id === 'catch') {
+ advance('catch');
+ nonadjacent(token, nexttoken);
+ advance('(');
+ s = scope;
+ scope = Object.create(s);
+ e = nexttoken.value;
+ if (nexttoken.type !== '(identifier)') {
+ warning("Expected an identifier and instead saw '{a}'.", nexttoken, e);
+ } else {
+ addlabel(e, 'unused');
+ }
+ advance();
+ advance(')');
+ block(false);
+ b = true;
+ scope = s;
+ }
+ if (nexttoken.id === 'finally') {
+ advance('finally');
+ block(false);
+ return;
+ } else if (!b) {
+ error("Expected '{a}' and instead saw '{b}'.", nexttoken, 'catch', nexttoken.value);
+ }
+ });
+ blockstmt('while',
+ function() {
+ var t = nexttoken;
+ funct['(breakage)'] += 1;
+ funct['(loopage)'] += 1;
+ advance('(');
+ nonadjacent(this, t);
+ nospace();
+ parse(20);
+ if (nexttoken.id === '=') {
+ warning("Expected a conditional expression and instead saw an assignment.");
+ advance('=');
+ parse(20);
+ }
+ advance(')', t);
+ nospace(prevtoken, token);
+ block(true);
+ funct['(breakage)'] -= 1;
+ funct['(loopage)'] -= 1;
+ }).labelled = true;
+ reserve('with');
+ blockstmt('switch',
+ function() {
+ var t = nexttoken,
+ g = false;
+ funct['(breakage)'] += 1;
+ advance('(');
+ nonadjacent(this, t);
+ nospace();
+ this.condition = parse(20);
+ advance(')', t);
+ nospace(prevtoken, token);
+ nonadjacent(token, nexttoken);
+ t = nexttoken;
+ advance('{');
+ nonadjacent(token, nexttoken);
+ indent += option.indent;
+ this.cases = [];
+ for (;;) {
+ switch (nexttoken.id) {
+ case 'case':
+ switch (funct['(verb)']) {
+ case 'break':
+ case 'case':
+ case 'continue':
+ case 'return':
+ case 'switch':
+ case 'throw':
+ break;
+ default:
+ warning("Expected a 'break' statement before 'case'.", token);
+ }
+ indentation( - option.indent);
+ advance('case');
+ this.cases.push(parse(20));
+ g = true;
+ advance(':');
+ funct['(verb)'] = 'case';
+ break;
+ case 'default':
+ switch (funct['(verb)']) {
+ case 'break':
+ case 'continue':
+ case 'return':
+ case 'throw':
+ break;
+ default:
+ warning("Expected a 'break' statement before 'default'.", token);
+ }
+ indentation( - option.indent);
+ advance('default');
+ g = true;
+ advance(':');
+ break;
+ case '}':
+ indent -= option.indent;
+ indentation();
+ advance('}', t);
+ if (this.cases.length === 1 || this.condition.id === 'true' || this.condition.id === 'false') {
+ warning("This 'switch' should be an 'if'.", this);
+ }
+ funct['(breakage)'] -= 1;
+ funct['(verb)'] = undefined;
+ return;
+ case '(end)':
+ error("Missing '{a}'.", nexttoken, '}');
+ return;
+ default:
+ if (g) {
+ switch (token.id) {
+ case ',':
+ error("Each value should have its own case label.");
+ return;
+ case ':':
+ statements();
+ break;
+ default:
+ error("Missing ':' on a case clause.", token);
+ }
+ } else {
+ error("Expected '{a}' and instead saw '{b}'.", nexttoken, 'case', nexttoken.value);
+ }
+ }
+ }
+ }).labelled = true;
+ stmt('debugger',
+ function() {
+ if (!option.debug) {
+ warning("All 'debugger' statements should be removed.");
+ }
+ });
+ stmt('do',
+ function() {
+ funct['(breakage)'] += 1;
+ funct['(loopage)'] += 1;
+ block(true);
+ advance('while');
+ var t = nexttoken;
+ nonadjacent(token, t);
+ advance('(');
+ nospace();
+ parse(20);
+ if (nexttoken.id === '=') {
+ warning("Expected a conditional expression and instead saw an assignment.");
+ advance('=');
+ parse(20);
+ }
+ advance(')', t);
+ nospace(prevtoken, token);
+ funct['(breakage)'] -= 1;
+ funct['(loopage)'] -= 1;
+ }).labelled = true;
+ blockstmt('for',
+ function() {
+ var s, t = nexttoken;
+ funct['(breakage)'] += 1;
+ funct['(loopage)'] += 1;
+ advance('(');
+ nonadjacent(this, t);
+ nospace();
+ if (peek(nexttoken.id === 'var' ? 1 : 0).id === 'in') {
+ if (nexttoken.id === 'var') {
+ advance('var');
+ varstatement(true);
+ } else {
+ advance();
+ }
+ advance('in');
+ parse(20);
+ advance(')', t);
+ s = block(true);
+ if (!option.forin && (s.length > 1 || typeof s[0] !== 'object' || s[0].value !== 'if')) {
+ warning("The body of a for in should be wrapped in an if statement to filter unwanted properties from the prototype.", this);
+ }
+ funct['(breakage)'] -= 1;
+ funct['(loopage)'] -= 1;
+ return this;
+ } else {
+ if (nexttoken.id !== ';') {
+ if (nexttoken.id === 'var') {
+ advance('var');
+ varstatement();
+ } else {
+ for (;;) {
+ parse(0, 'for');
+ if (nexttoken.id !== ',') {
+ break;
+ }
+ advance(',');
+ }
+ }
+ }
+ advance(';');
+ if (nexttoken.id !== ';') {
+ parse(20);
+ if (nexttoken.id === '=') {
+ warning("Expected a conditional expression and instead saw an assignment.");
+ advance('=');
+ parse(20);
+ }
+ }
+ advance(';');
+ if (nexttoken.id === ';') {
+ error("Expected '{a}' and instead saw '{b}'.", nexttoken, ')', ';');
+ }
+ if (nexttoken.id !== ')') {
+ for (;;) {
+ parse(0, 'for');
+ if (nexttoken.id !== ',') {
+ break;
+ }
+ advance(',');
+ }
+ }
+ advance(')', t);
+ nospace(prevtoken, token);
+ block(true);
+ funct['(breakage)'] -= 1;
+ funct['(loopage)'] -= 1;
+ }
+ }).labelled = true;
+ stmt('break',
+ function() {
+ var v = nexttoken.value;
+ if (funct['(breakage)'] === 0) {
+ warning("Unexpected '{a}'.", nexttoken, this.value);
+ }
+ nolinebreak(this);
+ if (nexttoken.id !== ';') {
+ if (token.line === nexttoken.line) {
+ if (funct[v] !== 'label') {
+ warning("'{a}' is not a statement label.", nexttoken, v);
+ } else if (scope[v] !== funct) {
+ warning("'{a}' is out of scope.", nexttoken, v);
+ }
+ advance();
+ }
+ }
+ reachable('break');
+ });
+ stmt('continue',
+ function() {
+ var v = nexttoken.value;
+ if (funct['(breakage)'] === 0) {
+ warning("Unexpected '{a}'.", nexttoken, this.value);
+ }
+ nolinebreak(this);
+ if (nexttoken.id !== ';') {
+ if (token.line === nexttoken.line) {
+ if (funct[v] !== 'label') {
+ warning("'{a}' is not a statement label.", nexttoken, v);
+ } else if (scope[v] !== funct) {
+ warning("'{a}' is out of scope.", nexttoken, v);
+ }
+ advance();
+ }
+ }
+ reachable('continue');
+ });
+ stmt('return',
+ function() {
+ nolinebreak(this);
+ if (nexttoken.id === '(regexp)') {
+ warning("Wrap the /regexp/ literal in parens to disambiguate the slash operator.");
+ }
+ if (nexttoken.id !== ';' && !nexttoken.reach) {
+ nonadjacent(token, nexttoken);
+ parse(20);
+ }
+ reachable('return');
+ });
+ stmt('throw',
+ function() {
+ nolinebreak(this);
+ nonadjacent(token, nexttoken);
+ parse(20);
+ reachable('throw');
+ });
+ reserve('void');
+ reserve('class');
+ reserve('const');
+ reserve('enum');
+ reserve('export');
+ reserve('extends');
+ reserve('float');
+ reserve('goto');
+ reserve('import');
+ reserve('let');
+ reserve('super');
+ function jsonValue() {
+ function jsonObject() {
+ var t = nexttoken;
+ advance('{');
+ if (nexttoken.id !== '}') {
+ for (;;) {
+ if (nexttoken.id === '(end)') {
+ error("Missing '}' to match '{' from line {a}.", nexttoken, t.line + 1);
+ } else if (nexttoken.id === '}') {
+ warning("Unexpected comma.", token);
+ break;
+ } else if (nexttoken.id === ',') {
+ error("Unexpected comma.", nexttoken);
+ } else if (nexttoken.id !== '(string)') {
+ warning("Expected a string and instead saw {a}.", nexttoken, nexttoken.value);
+ }
+ advance();
+ advance(':');
+ jsonValue();
+ if (nexttoken.id !== ',') {
+ break;
+ }
+ advance(',');
+ }
+ }
+ advance('}');
+ }
+ function jsonArray() {
+ var t = nexttoken;
+ advance('[');
+ if (nexttoken.id !== ']') {
+ for (;;) {
+ if (nexttoken.id === '(end)') {
+ error("Missing ']' to match '[' from line {a}.", nexttoken, t.line + 1);
+ } else if (nexttoken.id === ']') {
+ warning("Unexpected comma.", token);
+ break;
+ } else if (nexttoken.id === ',') {
+ error("Unexpected comma.", nexttoken);
+ }
+ jsonValue();
+ if (nexttoken.id !== ',') {
+ break;
+ }
+ advance(',');
+ }
+ }
+ advance(']');
+ }
+ switch (nexttoken.id) {
+ case '{':
+ jsonObject();
+ break;
+ case '[':
+ jsonArray();
+ break;
+ case 'true':
+ case 'false':
+ case 'null':
+ case '(number)':
+ case '(string)':
+ advance();
+ break;
+ case '-':
+ advance('-');
+ if (token.character !== nexttoken.from) {
+ warning("Unexpected space after '-'.", token);
+ }
+ adjacent(token, nexttoken);
+ advance('(number)');
+ break;
+ default:
+ error("Expected a JSON value.", nexttoken);
+ }
+ }
+ var itself = function(s, o) {
+ var a, i;
+ JSLINT.errors = [];
+ predefined = Object.create(standard);
+ if (o) {
+ a = o.predef;
+ if (a instanceof Array) {
+ for (i = 0; i < a.length; i += 1) {
+ predefined[a[i]] = true;
+ }
+ }
+ if (o.adsafe) {
+ o.safe = true;
+ }
+ if (o.safe) {
+ o.browser = false;
+ o.css = false;
+ o.debug = false;
+ o.eqeqeq = true;
+ o.evil = false;
+ o.forin = false;
+ o.nomen = true;
+ o.on = false;
+ o.rhino = false;
+ o.safe = true;
+ o.sidebar = false;
+ o.strict = true;
+ o.sub = false;
+ o.undef = true;
+ o.widget = false;
+ predefined.Date = false;
+ predefined['eval'] = false;
+ predefined.Function = false;
+ predefined.Object = false;
+ predefined.ADSAFE = true;
+ }
+ option = o;
+ } else {
+ option = {};
+ }
+ option.indent = option.indent || 4;
+ adsafe_id = '';
+ adsafe_may = false;
+ adsafe_went = false;
+ approved = {};
+ if (option.approved) {
+ for (i = 0; i < option.approved.length; i += 1) {
+ approved[option.approved[i]] = option.approved[i];
+ }
+ }
+ approved.test = 'test';
+ tab = '';
+ for (i = 0; i < option.indent; i += 1) {
+ tab += ' ';
+ }
+ indent = 0;
+ global = Object.create(predefined);
+ scope = global;
+ funct = {
+ '(global)': true,
+ '(name)': '(global)',
+ '(scope)': scope,
+ '(breakage)': 0,
+ '(loopage)': 0
+ };
+ functions = [];
+ ids = {};
+ urls = [];
+ src = false;
+ xmode = false;
+ stack = null;
+ member = {};
+ membersOnly = null;
+ implied = {};
+ inblock = false;
+ lookahead = [];
+ jsonmode = false;
+ warnings = 0;
+ lex.init(s);
+ prereg = true;
+ prevtoken = token = nexttoken = syntax['(begin)'];
+ assume();
+ try {
+ advance();
+ if (nexttoken.value.charAt(0) === '<') {
+ html();
+ if (option.adsafe && !adsafe_went) {
+ warning("ADsafe violation: Missing ADSAFE.go.", this);
+ }
+ } else {
+ switch (nexttoken.id) {
+ case '{':
+ case '[':
+ option.laxbreak = true;
+ jsonmode = true;
+ jsonValue();
+ break;
+ case '@':
+ case '*':
+ case '#':
+ case '.':
+ case ':':
+ xmode = 'style';
+ advance();
+ if (token.id !== '@' || !nexttoken.identifier || nexttoken.value !== 'charset') {
+ error('A css file should begin with @charset "UTF-8";');
+ }
+ advance();
+ if (nexttoken.type !== '(string)' && nexttoken.value !== 'UTF-8') {
+ error('A css file should begin with @charset "UTF-8";');
+ }
+ advance();
+ advance(';');
+ styles();
+ break;
+ default:
+ if (option.adsafe && option.fragment) {
+ warning("ADsafe violation.", this);
+ }
+ statements('lib');
+ }
+ }
+ advance('(end)');
+ } catch(e) {
+ if (e) {
+ JSLINT.errors.push({
+ reason: e.message,
+ line: e.line || nexttoken.line,
+ character: e.character || nexttoken.from
+ },
+ null);
+ }
+ }
+ return JSLINT.errors.length === 0;
+ };
+ function to_array(o) {
+ var a = [],
+ k;
+ for (k in o) {
+ if (o.hasOwnProperty(k)) {
+ a.push(k);
+ }
+ }
+ return a;
+ }
+ itself.report = function(option, sep) {
+ var a = [],
+ c,
+ e,
+ f,
+ i,
+ k,
+ l,
+ m = '',
+ n,
+ o = [],
+ s,
+ v,
+ cl,
+ va,
+ un,
+ ou,
+ gl,
+ la;
+ function detail(h, s, sep) {
+ if (s.length) {
+ o.push('<div><i>' + h + '</i> ' + s.sort().join(sep || ', ') + '</div>');
+ }
+ }
+ s = to_array(implied);
+ k = JSLINT.errors.length;
+ if (k || s.length > 0) {
+ o.push('<div id=errors><i>Error:</i>');
+ if (s.length > 0) {
+ s.sort();
+ for (i = 0; i < s.length; i += 1) {
+ s[i] = '<code>' + s[i] + '</code>&nbsp;<i>' + implied[s[i]].join(' ') + '</i>';
+ }
+ o.push('<p><i>Implied global:</i> ' + s.join(', ') + '</p>');
+ c = true;
+ }
+ for (i = 0; i < k; i += 1) {
+ c = JSLINT.errors[i];
+ if (c) {
+ e = c.evidence || '';
+ o.push('<p>Problem' + (isFinite(c.line) ? ' at line ' + (c.line + 1) + ' character ' + (c.character + 1) : '') + ': ' + c.reason.entityify() + '</p><p class=evidence>' + (e && (e.length > 80 ? e.slice(0, 77) + '...': e).entityify()) + '</p>');
+ }
+ }
+ o.push('</div>');
+ if (!c) {
+ return o.join('');
+ }
+ }
+ if (!option) {
+ o.push('<br><div id=functions>');
+ if (urls.length > 0) {
+ detail("URLs<br>", urls, '<br>');
+ }
+ s = to_array(scope);
+ if (s.length === 0) {
+ if (jsonmode) {
+ if (k === 0) {
+ o.push('<p>JSON: good.</p>');
+ } else {
+ o.push('<p>JSON: bad.</p>');
+ }
+ } else {
+ o.push('<div><i>No new global variables introduced.</i></div>');
+ }
+ } else {
+ o.push('<div><i>Global</i> ' + s.sort().join(', ') + '</div>');
+ }
+ for (i = 0; i < functions.length; i += 1) {
+ f = functions[i];
+ cl = [];
+ va = [];
+ un = [];
+ ou = [];
+ gl = [];
+ la = [];
+ for (k in f) {
+ if (f.hasOwnProperty(k) && k.charAt(0) !== '(') {
+ v = f[k];
+ switch (v) {
+ case 'closure':
+ cl.push(k);
+ break;
+ case 'var':
+ va.push(k);
+ break;
+ case 'unused':
+ un.push(k);
+ break;
+ case 'label':
+ la.push(k);
+ break;
+ case 'outer':
+ ou.push(k);
+ break;
+ case true:
+ gl.push(k);
+ break;
+ }
+ }
+ }
+ o.push('<br><div class=function><i>' + f['(line)'] + '</i> ' + (f['(name)'] || '') + '(' + (f['(params)'] || '') + ')</div>');
+ detail('Closure', cl);
+ detail('Variable', va);
+ detail('<big><b>Unused</b></big>', un);
+ detail('Label', la);
+ detail('Outer', ou);
+ detail('Global', gl);
+ }
+ a = [];
+ for (k in member) {
+ if (typeof member[k] === 'number') {
+ a.push(k);
+ }
+ }
+ if (a.length) {
+ a = a.sort();
+ m = '<br><pre>/*members ';
+ l = 10;
+ for (i = 0; i < a.length; i += 1) {
+ k = a[i];
+ n = k.name();
+ if (l + n.length > 72) {
+ o.push(m + '<br>');
+ m = ' ';
+ l = 1;
+ }
+ l += n.length + 2;
+ if (member[k] === 1) {
+ n = '<i>' + n + '</i>';
+ }
+ if (i < a.length - 1) {
+ n += ', ';
+ }
+ m += n;
+ }
+ o.push(m + '<br>*/</pre>');
+ }
+ o.push('</div>');
+ }
+ return o.join('');
+ };
+ return itself;
+} (); (function(a) {
+
+ var input="";
+ var line="";
+ var blankcount="0";
+ while (blankcount < 10){
+ line=readline();
+
+ if (line=="")
+ blankcount++;
+ else
+ blankcount=0;
+ if (line=="END") break;
+ input += line;
+ input += "\n";
+ }
+ input = input.substring(0, input.length-blankcount);
+
+ if (!input) {
+ print("No input!");
+ quit(1);
+ }
+ if (!JSLINT(input, {
+ rhino: true,
+ passfail: false
+ })) {
+ for (var i = 0; i < JSLINT.errors.length; i += 1) {
+ var e = JSLINT.errors[i];
+ if (e) {
+ print('Lint at line ' + (e.line + 1) + ' character ' + (e.character + 1) + ': ' + e.reason);
+ print((e.evidence || '').replace(/^\s*(\S*(\s+\S+)*)\s*$/, "$1"));
+ print('');
+ }
+ }
+ } else {
+ print("jslint: No problems found.");
+ quit();
+ }
+})(arguments);
diff --git a/loadutil b/loadutil
new file mode 100755
index 0000000..0e6a23d
--- a/dev/null
+++ b/loadutil
@@ -0,0 +1,48 @@
+#!/bin/zsh
+
+type=${1:="sess"}
+
+
+if [[ $type == sess ]];then
+ export yprofile="dactyl-sess"
+ pushd ~/.dactyls/sessions
+ file=$(print -l *(om[1,-1]) | ${=DMENU})
+ [[ -z $file ]] && exit
+ if [[ -n $file && $file == *: ]];then
+ while :;do
+ url=$($=DMENU < $file)
+ [[ -z $url ]] && break
+ firefox -new-tab $url[(w)1]
+ done
+ else
+ for line in "${(f)$(<$file)}";do
+ firefox -new-tab $line[(w)1]
+ done
+ fi
+ popd
+elif [[ $type == ist ]];then
+ export yprofile="dactyl-hist"
+ pushd ~/.dactyls/history/
+ #url=$(tac history | grep -v "about:blank" | uniq | ${(e)=YDMENU})
+ url=$(tac history | grep -v "about:" | uniq | $=DMENU)
+ [[ -n $url ]] && firefox -new-tab $url[(w)1]
+ popd
+elif [[ $type == speed ]];then
+ export yprofile="speed"
+ while :;do
+
+ url=$(awk '{ print NR" "$0 }' ~/.dactyls/speedy/speed | ${(e)=YDMENU} | awk '{ print $2 }')
+ [[ -z $url ]] && exit
+ firefox -new-tab "$url"
+
+ done
+elif [[ $type == lark ]];then
+ export yprofile="dactyl-lark"
+ while :;do
+
+ url=$(awk '{ print NR" "$0 }' ~/.dactyls/bmark/larks | ${(e)=YDMENU} | awk '{ print $2 }')
+ [[ -z $url ]] && exit
+ firefox -new-tab "$url"
+
+ done
+fi
diff --git a/mad b/mad
new file mode 100755
index 0000000..3bb640e
--- a/dev/null
+++ b/mad
@@ -0,0 +1,71 @@
+#!/bin/zsh
+CMD="$1"
+ARG="$2"
+typeset -A displacement dispatcher
+displacement=(prev '-10' next '+10')
+
+dispatcher=(
+ slide_mplayer ' mplayeraux s${cmd} '
+ slide_mpd ' mpc seek ${displacement[$cmd]}% '
+ toggle_mplayer ' mplayeraux pause '
+ toggle_mpd ' mpc toggle '
+ toggle_none ' none '
+ next_mplayer ' mplayeraux p$cmd '
+ next_mpd ' mpc $cmd '
+)
+
+
+function running(){
+ ecode=0
+ if mpc status | /bin/grep -q "\[playing\]";then
+ retval="mpd"
+ return
+ fi
+ mplayeraux status
+ ecode=$?
+ if [[ $ecode == 0 ]];then
+ retval="mplayer"
+ else
+ if pidof -s mplayer;then
+ latest mpd mplayer
+ fret=$retval
+ else
+ fret=mpd
+ fi
+ [[ $ecode == 1 ]] && fret=mpd
+ retval="$fret"
+ fi
+}
+
+function latest(){
+ typeset -a args
+ args=($*)
+ counter=1
+ base=0
+ output=""
+ for procs in ${(@)args};do
+ for pids in $(pidof $procs);do
+ #print $pids
+ temp=$(stat --printf=%Y /proc/$pids/statm)
+ if [[ $temp -ge $base ]];then
+ base=$temp;
+ #retval=${(P)$counter}
+ output=$counter
+ fi
+ done
+ counter=$(( counter+1 ))
+ done
+ retval=$args[$output]
+}
+
+
+function {toggle,slide,next} {
+ which="$0"
+ cmd=${1:-next}
+ running
+ function="${which}_${retval}"
+ ${(e)=dispatcher[$function]}
+}
+
+
+$CMD $ARG
diff --git a/mailboxes b/mailboxes
new file mode 100755
index 0000000..51f8d72
--- a/dev/null
+++ b/mailboxes
@@ -0,0 +1,43 @@
+#!/bin/zsh
+#set -x
+which pushd &>/dev/null || exit 1
+
+pushd -q ~/Mail/Kronos/
+[[ $PWD == */Mail/Kronos ]] && find . -type d -mtime +30 -exec rm -rf {} &>/dev/null \;
+popd -q
+
+#today=$(date +%d-%m-%Y)
+#yesterday=$(date --date='1 day ago' +%d-%m-%Y)
+
+today=$(date +%Y-%m-%d)
+yesterday=$(date --date='1 day ago' +%Y-%m-%d)
+
+if [[ ! ${$(readlink ~/Mail/Kronos/Today):t} == $today \
+ || ! ${$(readlink ~/Mail/Kronos/Yesterday/):t} == $yesterday ]];then
+ rm ~/Mail/Kronos/Yesterday &>/dev/null
+ ln -sf ~/Mail/Kronos/$yesterday ~/Mail/Kronos/Yesterday
+ rm ~/Mail/Kronos/Today &>/dev/null
+ ln -sf ~/Mail/Kronos/$today ~/Mail/Kronos/Today
+fi
+
+
+echo "~/Mail/Kronos/Today \
+ ~/Mail/Kronos/Yesterday \
+ ~/Mail/INBOX/ \
+ ~/Mail/Me/ \
+ ~/Mail/Dyne/ \
+ ~/Mail/Wnohang/ \
+ ~/Mail/Search/ \
+ ~/Mail/Kronos/ \
+ ~/Mail/Yahoo/ \
+ ~/Mail/Sent/ \
+ $(find ~/Mail/ -type d -name new -printf '%d:%[email protected]:%h \n' \
+ | sort -t : -k 1,2 -r \
+ | cut -d : -f 3 \
+ | /bin/grep -v -E '(Chro|Old|Junk|Kro|INBOX|Search|Dyne|Wnohang|Yahoo|Sent|Me|Archive)' \
+ | tr '\n' ' ' \
+ | tr -s ' ') \
+ ~/Mail/Archive/eBackup/"
+
+
+#set +x
diff --git a/mailhops b/mailhops
new file mode 100755
index 0000000..d42caa0
--- a/dev/null
+++ b/mailhops
@@ -0,0 +1,96 @@
+#!/usr/bin/perl -w
+# Copyright (c) 1999 Marius Gedminas
+# Shows the route of an Internet mail message
+# Version 0.0.1pre-alpha
+#
+# Patched by Roland Rosenfeld
+# $ Id: mailhops,v 1.3 2000/01/25 20:18:24 roland Exp roland $
+
+use strict;
+use POSIX qw(mktime);
+use Date::Parse;
+
+my $verbose = 0;
+
+# Setup
+my %Months = (
+ Jan => 1,
+ Feb => 2,
+ Mar => 3,
+ Apr => 4,
+ May => 5,
+ Jun => 6,
+ Jul => 7,
+ Aug => 8,
+ Sep => 9,
+ Oct => 10,
+ Nov => 11,
+ Dec => 12,
+ );
+
+# Read headers
+$/ = '';
+my $head = <>;
+$head =~ s/\n\s+/ /g;
+my @headers = split("\n", $head);
+
+# Parse headers
+my @hops;
+for (@headers) {
+ next unless /^(>?Received|Date):/;
+ my $time;
+ my $host;
+ my $from;
+ if (/^Date:\s+(.*)/) {
+ $host = "Date:";
+ $time = $1;
+ $from = "";
+ } else {
+ $host = "(unknown)";
+ $host = $1 if /\sby\s+([a-z0-9\-_+.]+)\s/ && $1 ne "uid";
+ $from = "(unknown)";
+ $from = $1 if /\sfrom\s+([a-z0-9\-_+.]+(?:\s+[(].+?[)]))\s/;
+ $time = "(unknown)";
+ $time = $1 if /;\s+(.+)$/;
+ $time =~ s/using.*//;
+ }
+
+ my $epoch = str2time ($time);
+
+ unshift @hops, { HOST => $host, FROM => $from, TIME => $epoch};
+}
+
+# Print output
+print " Host Date received (local) Lag Total lag\n";
+my $nr = 0;
+my ($first, $prev);
+for (@hops) {
+ my $host = $_->{HOST};
+ my $from = $_->{FROM};
+ my $time = $_->{TIME};
+ $first = $prev = $time unless defined $first;
+ printf "%2d. %-31.31s", ++$nr, $host;
+ do { print "\n"; next } unless defined $time;
+
+ my $delta = $time - $prev;
+ my $neg = $delta < 0; $delta = abs($delta);
+ my $delta_h = int($delta / 3600);
+ my $delta_m = int(($delta - $delta_h * 3600) / 60);
+ my $delta_s = ($delta - $delta_h * 3600 - $delta_m * 60);
+
+ my ($sec,$min,$hour,$day,$mon,$year,undef,undef,$dst) = localtime($time);
+
+ printf " %4d-%02d-%02d %02d:%02d:%02d %s%02d:%02d:%02d",
+ 1900+$year, $mon+1, $day, $hour, $min, $sec,
+ $neg ? '-' : ' ', $delta_h, $delta_m, $delta_s;
+
+ $delta = $time - $first;
+ $neg = $delta < 0; $delta = abs($delta);
+ $delta_h = int($delta / 3600);
+ $delta_m = int(($delta - $delta_h * 3600) / 60);
+ $delta_s = ($delta - $delta_h * 3600 - $delta_m * 60);
+ printf " %s%02d:%02d:%02d\n",
+ $neg ? '-' : ' ', $delta_h, $delta_m, $delta_s;
+ print " from $from\n" if $verbose;
+ $prev = $time;
+} \ No newline at end of file
diff --git a/mailto b/mailto
new file mode 100755
index 0000000..029df0f
--- a/dev/null
+++ b/mailto
@@ -0,0 +1,27 @@
+#!/bin/zsh
+# Depends on url-utils package and zenity
+mailto="${1%\?*}"
+subject="$(urldecode <<<${${${1#*\?}%&*}#*=})"
+body="$(urldecode <<<${${${1#*\?}#*&}#*=})"
+
+zenity --question --height 400 --width 680 --text "Send mail with subject $subject ?"
+
+[[ $? -eq 1 ]] && exit
+
+echo -e "\nDate: $(date -R)
+Mailto: $mailto
+Subject: $subject
+Body: $body\n" >>| ~/logs/mailto.log
+
+
+if [[ ! -z "$body" ]];then
+ /usr/bin/mutt -s "$subject" "$mailto" <<<"$body"
+else
+ urxvt -e /usr/bin/mutt -s "$subject" "$mailto"
+fi
+
+if [[ $? eq 1 ]];then
+ ~/bin/notify-send "Something went wrong..oops"
+else
+ ~/bin/notify-send "Mail with $subject successfully sent"
+fi
diff --git a/mark-yank-urls b/mark-yank-urls
new file mode 100755
index 0000000..d6cc743
--- a/dev/null
+++ b/mark-yank-urls
@@ -0,0 +1,310 @@
+#!/usr/bin/perl
+# Author: Bart Trojanowski <[email protected]>
+# Website: http://www.jukie.net/~bart/blog/urxvt-url-yank
+# License: GPLv2
+# Modified by: Raghavendra Prabhu raghu d0t prabhu 13 AT google mail
+
+use strict;
+use warnings;
+
+my $url_matcher = qr{(
+ (?:https?://|ftp://|news://|mailto:|file://|www\.)[ab-zA-Z0-9\-\@;\/?:&=%\$_.+!*\x27(),~#]+
+ [ab-zA-Z0-9\-\@;\/?&=%\$_+!*\x27()~] # exclude some trailing characters (heuristic)
+)}x;
+
+
+sub on_start {
+ my ($term) = @_;
+
+ $term->{have_Clipboard} = eval { require Clipboard; };
+ if ($term->{have_Clipboard}) {
+ import Clipboard;
+ }
+
+ eval { require Regexp::Common::URI };
+ require Regexp::Common;
+ Regexp::Common->import('URI');
+
+ $url_matcher = $Regexp::Common::RE{URI}{HTTP};
+ }
+
+ $term->{browser} = $term->x_resource ("urlLauncher") || "x-www-browser";
+ $term->{browser2} = $term->x_resource ("launcher2") || "x-www-browser";
+ ()
+}
+
+sub on_line_update {
+ my ($term, $row) = @_;
+
+ # Fetch the line that has changed.
+ my $line = $term->line($row);
+ my $text = $line->t;
+
+ # Find and underline URLs.
+ while ($text =~ /($url_matcher)/g) {
+ my $url = $1;
+ my $rend = $line->r;
+
+ # Also yank the url
+ Clipboard->copy($url);
+
+ # Mark all characters as underlined. we _must_ not toggle underline, as
+ # we might get called on an already-marked URL.
+ my $underlineURLs = $term->x_resource ('underlineURLs') || 'false';
+ if($underlineURLs eq 'true') {
+ my ($first, $last) = ($-[1], $+[1] - 1);
+
+ --$last if $url =~ s/["']$//;
+
+ $_ |= urxvt::RS_Uline for @{$rend}[$first .. $last];
+
+ $line->r($rend);
+ }
+ }
+
+ ()
+}
+
+sub on_button_release {
+ my ($term, $event) = @_;
+
+ my $mask = $term->ModLevel3Mask | $term->ModMetaMask
+ | urxvt::ShiftMask | urxvt::ControlMask;
+
+ if ($event->{button} == 2 && ($event->{state} & $mask) == 0) {
+ my $row = $event->{row};
+ my $col = $event->{col};
+
+ my $line = $term->line ($row);
+ my $text = $line->t;
+
+ while ($text =~ /($url_matcher)/g) {
+ my ($url, $first, $last) = ($1, $-[1], $+[1]);
+
+ if($first <= $col && $last >= $col) {
+ $url =~ s/["']$//;
+ $term->exec_async($term->{browser}, $url);
+ return 1;
+ }
+ }
+ }
+
+ ()
+}
+
+
+my $mark_mode_active = 0;
+my %mod = ( 'control' => 0, 'shift' => 0 );
+my $url_selected = -1;
+my @url_db = ();
+
+
+sub do_scan_for_urls {
+ my ($term) = @_;
+
+ @url_db = ();
+
+ my $row_start = $term->top_row;
+ my $row_end = $term->nrow;
+
+ for my $row ($row_start .. $row_end) {
+
+ # Fetch the line that has changed.
+ my $line = $term->line ($row);
+ my $text = $line->t;
+
+ # Find all urls (if any).
+ while ($text =~ /($url_matcher)/g) {
+ my $rend = $line->r;
+
+ my ($url, $first, $last) = ($1, $-[1], $+[1] - 1);
+
+ --$last if $url =~ s/["']$//;
+
+ my %h = (
+ row => $row,
+ col_from => $first,
+ col_to => $last,
+ url => $url,
+ );
+
+ push @url_db, \%h;
+ }
+ }
+
+ # 0 for none, positive count otherwise
+ return $#url_db + 1;
+}
+
+
+sub on_user_command {
+ my ($term, $cmd) = @_;
+
+ activate_mark_mode($term) if $cmd eq 'mark-yank-urls:activate_mark_mode';
+
+ ()
+}
+
+sub on_key_press {
+ my ($term, $event, $keysym, $octets) = @_;
+
+ if ($keysym == 65507) { # <control>
+ $mod{control} = 1;
+
+ } elsif ($keysym == 65505) { # <shift>
+ $mod{shift} = 1;
+
+ }
+
+ # Ignore all input when we are active.
+ $mark_mode_active && return 1;
+
+ ()
+}
+
+sub on_key_release {
+ my ($term, $event, $keysym, $octets) = @_;
+
+ if ($mark_mode_active) {
+ my $ch = chr($keysym);
+
+ if ($keysym == 65307) { # <esc>
+ deactivate_mark_mode ($term);
+ return 1;
+
+ } elsif ($keysym == 65293) { # <enter>
+ my $url = get_active_url($term);
+ $term->exec_async($term->{browser}, $url);
+ deactivate_mark_mode ($term);
+ return 1;
+
+ } elsif ($keysym == 65507) { # <control>
+ $mod{control} = 0;
+ return 1;
+
+ } elsif ($keysym == 93) { # ]
+ my $url = get_active_url($term);
+ $term->exec_async($term->{browser2}, $url);
+ deactivate_mark_mode ($term);
+ return 1;
+ } elsif ($keysym == 65505) { # <shift>
+ $mod{shift} = 0;
+ return 1;
+
+ } elsif ($mod{control} && (($ch eq 'n') || ($ch eq 'p'))) {
+ # ^n and ^p to cycle list
+ my $dir = ($ch eq 'n') ? 1 : -1;
+ move_highlight ($term, $dir);
+
+ } elsif ($ch eq 'y') { # y
+ do_copy ($term);
+ deactivate_mark_mode ($term);
+ return 1;
+
+ }
+
+ return 1;
+ }
+
+ ()
+}
+
+sub get_active_url {
+ my ($term) = @_;
+ my $max = $#url_db + 1;
+
+ return if $url_selected < 0 || $url_selected >= $max;
+ return if not defined $url_db[$url_selected];
+ my $o = $url_db[$url_selected];
+ my %h = %$o;
+
+ return $h{url};
+}
+
+sub do_copy {
+ my ($term) = @_;
+
+ my $text = get_active_url ($term);
+
+ if ($term->{have_Clipboard}) {
+ Clipboard->copy($text);
+ } else {
+ $text =~ s/\(["|><&()]\)/\\$1/;
+ system ("echo -n \"$text\" | xclip -i");
+ }
+}
+
+sub move_highlight {
+ my ($term, $dir) = @_;
+ my $max = $#url_db + 1;
+
+ do_highlight ($term, 0);
+
+ $url_selected = ($max + $url_selected + $dir) % $max;
+
+ do_highlight ($term, 1);
+
+ $term->want_refresh;
+}
+
+sub do_highlight {
+ my ($term, $enable) = @_;
+ my $max = $#url_db + 1;
+
+ return if $url_selected < 0 || $url_selected >= $max;
+ return if not defined $url_db[$url_selected];
+
+ my $o = $url_db[$url_selected];
+ my %h = %$o;
+
+ my $row = $h{row};
+ my $line = $term->line ($row);
+ my $text = $line->t;
+ my $rend = $line->r;
+
+ if ($enable) {
+ $_ |= urxvt::RS_RVid
+ for @{$rend}[ $h{col_from} .. $h{col_to}];
+
+ # make it visible
+ $term->view_start ( $row < 0 ? $row : 0 );
+
+ } else {
+ $_ &= ~urxvt::RS_RVid
+ for @{$rend}[ $h{col_from} .. $h{col_to}];
+ }
+
+ $line->r ($rend);
+}
+
+sub activate_mark_mode {
+ my ($term) = @_;
+
+ if ($mark_mode_active) {
+
+ move_highlight ($term, -1);
+
+ } elsif ( do_scan_for_urls ($term) ) {
+
+ $term->{save_view_start} = $term->view_start;
+
+ move_highlight ($term, 0);
+
+ $mark_mode_active=1 if ($url_selected > -1);
+ }
+}
+
+sub deactivate_mark_mode {
+ my ($term) = @_;
+
+ do_highlight ($term, 0);
+
+ $mark_mode_active = 0;
+ $url_selected = -1;
+
+ $term->view_start ($term->{save_view_start});
+ $term->want_refresh;
+}
+
+# vim: set et ts=4 sw=4:
diff --git a/mmove b/mmove
new file mode 100755
index 0000000..4d37d05
--- a/dev/null
+++ b/mmove
@@ -0,0 +1,15 @@
+#!/bin/zsh
+#set -x
+wid=$(xdotool search --pid $(pidof -s mplayer) 2>/dev/null)
+xord=600
+yord=0
+eyord=$(xwininfo -id $wid | /bin/grep -i "Absolute upper-left Y" | cut -d ":" -f 2| tr -d '[:space:]' )
+if [[ $eyord == "0" ]];then
+ yord=200
+elif [[ $eyord == "200" ]];then
+ yord=506
+elif [[ $eyord == "506" ]];then
+ yord=0
+fi
+xdotool windowmove $wid $xord $yord
+#set +x
diff --git a/mnger b/mnger
new file mode 100755
index 0000000..aa8a6b1
--- a/dev/null
+++ b/mnger
@@ -0,0 +1,40 @@
+#!/bin/zsh
+#set -x
+USER="raghavendra"
+duration=120
+export DISPLAY=:0
+state_file="/home/$USER/.dpms/.state"
+previous=$(< $state_file)
+
+while :
+do
+ # Heartbeat for flash
+ #state=$(top -b -n 1 | grep plugin-cont | head -1 | awk '{ print $8 }')
+ if xdotool search --name '.*flash.*';then
+ #if [[ -n $state && $state == 'R' ]];then
+ sudo -u $USER xdotool key ctrl
+ sleep 10
+ continue
+ fi
+ mplayeraux status
+ if [[ $? == 1 ]];then
+ sleep 60
+ else
+ inotifywait -t $duration /dev/input/event*
+ fi
+ if [[ $? == 2 && $previous == up ]];then
+ for file in /home/$USER/.dpms/*;do
+ $file down
+ done
+ echo -n "down" >| $state_file
+ elif [[ $previous == down ]];then
+ for file in /home/$USER/.dpms/*;do
+ $file up
+ done
+ echo -n "up" >| $state_file
+ else
+ sleep 60
+ #Something else or nothing
+ fi
+done
+#set +x
diff --git a/modprobe b/modprobe
new file mode 100755
index 0000000..60fee0a
--- a/dev/null
+++ b/modprobe
@@ -0,0 +1,41 @@
+#!/bin/zsh
+export HHOME="/home/raghavendra"
+modname=$1
+mprobe="/sbin/modprobe"
+
+typeset -A module_table
+module_table=(
+nvidia nvidia
+ip_set ipset
+xt_set ipset
+setx ipset
+vbox vboxdrv
+)
+
+prefix="$HHOME/Arch/Build/external"
+
+
+
+
+if [[ $? != 0 ]];then
+ echo "Kernel module [email protected] is not there"
+ echo "Brewing $modname module......."
+ export PATH="/usr/bin/vendor_perl:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:/usr/lib/perl5/vendor_perl/bin:/usr/bin/core_perl:/opt/plan9/bin"
+ # Optional
+ #unset CFLAGS CXXFLAGS LDFLAGS
+ export EXTRA_LDFLAGS=" -s "
+ pushd $prefix || exit 1
+ for key in ${(k)module_table};do
+ if [[ $modname == *$key* ]];then
+ file=$module_table[$key]
+ if [[ -n $file ]];then
+ ./$file &>>|$HHOME/logs/cmod.log
+ /sbin/depmod
+ $mprobe $file
+ else
+ exit 1
+ fi
+ fi
+ done
+fi
diff --git a/moviemanager b/moviemanager
new file mode 100755
index 0000000..64f2459
--- a/dev/null
+++ b/moviemanager
@@ -0,0 +1,70 @@
+#!/bin/zsh
+
+# Dependencies
+# 1. fetch_poster.py
+# 2. imdbpy.py -- both from mythtv scripts(no need to install mythtv.. just
+# search for those files on intertubes)
+# 3. imdb package from http://imdbpy.sourceforge.net/
+
+# To add
+# 1. My votes and other votes
+# 2. Symlink to download dir
+# 3. Symlink to torrent file (legal of course)
+# 4. Search and retrieval
+# 5. Linking between movies
+# 6. Handling parallel connections -- without getting banned :)
+# 7. Watched or not
+# 8. Better genre sorting/searching
+
+#set -x
+if [[ -z $2 ]];then echo "Not enough arguments"; exit 1;fi
+
+
+action=${1:-view}
+
+
+dir="$HOME/.imdatabase"
+ids="$dir/.movieids"
+rm $dir/.done &>/dev/null
+
+
+if [[ $action == add ]];then
+ url="$2"
+ stage=$3
+ temp=$RANDOM
+ movie_id="${${url:t}#[a-zA-Z][a-zA-Z]}"
+
+ if ps auxww | grep -q $movie_id | grep -v grep;then
+ exit
+ fi
+
+ if grep -q $movie_id $ids;then
+ exit
+ else
+ echo $movie_id >> "$dir/.movieids"
+ fi
+
+ title="$(get_movie.py $movie_id 2>/dev/null | tee /tmp/$temp | grep Title | cut -d : -f 2 | sed -e 's/^\s*//g')"
+ [[ -d "$dir/$title" || -d "$dir/$title-stage" ]] && exit
+
+
+ if [[ -n $stage ]];then
+ title="$title-stage"
+ fi
+ mkdir -p "$dir/$title"
+ mv /tmp/$temp "$dir/$title/desc"
+
+ poster=$(~/bin/fetch_poster.py -P $movie_id -a 2>/dev/null | tail -1)
+ mv $poster "$dir/$title/poster.jpg"
+ notify-send "IMDB:" "^i(/home/raghavendra/.notify-icons/movies.xpm)$title added"
+elif [[ $action == view ]];then
+ notify-send "IMDB:" "^i(/home/raghavendra/.notify-icons/movies.xpm)Nothing for now"
+fi
+
+# Completion token
+touch $dir/.done &>/dev/null
+#set +x
+
+#for x in *;do x="$(echo $x | tr -dc '[0-9A-Za-z ]')"; echo -n "$x::"; locate "$x[(w)1]" | /bin/grep -i -E '*.(avi|mkv|mp4)$' | grep "$x[(w)2]" | grep "$x[(w)3]" | head -1 ; echo "===================="; done | tee /tmp/llog
+#for line in $(cat /tmp/llog2);do first="$(echo $line | cut -d ':' -f 1)"; second="$(echo $line | cut -d : -f 3)"; pushd "*$first*" 2>/dev/null; echo "ln -s $second:h ." ;popd;done |& less
+
diff --git a/mpdStats b/mpdStats
new file mode 100755
index 0000000..3e3d063
--- a/dev/null
+++ b/mpdStats
@@ -0,0 +1,4 @@
+#!/bin/zsh
+#mpc status | tr '\n' '@' | perl -lne 'print substr($1,0,45).qq/ $2/ if m{^(.+)@\Q[\E.+(\d+:\d+/\d+:\d+)}'
+
+mpc status | tr '\n' '@' | perl -ne 'print "♬ ".substr($1,0,45).qq/ $2/." ♬" if m{^(.+)@\Q[\E.+(\d+:\d+/\d+:\d+)}'
diff --git a/mpdspl.py b/mpdspl.py
new file mode 100755
index 0000000..b723992
--- a/dev/null
+++ b/mpdspl.py
@@ -0,0 +1,622 @@
+#! /usr/bin/env python
+#
+# A script to create smart playlists, with a variety of criteria, out of an
+# MPD database.
+#
+# Authors:
+# Sebastien Delafond <[email protected]>
+# original implementation by Michael Walker <[email protected]>
+#
+# This code is licensed under the GPL v3, or any later version at your choice.
+
+import codecs, cPickle, datetime, operator, optparse
+import os, os.path, sqlite3, sys, re, textwrap, time
+
+DEFAULT_ENCODING = 'utf-8'
+
+DEFAULT_MDP_CONFIG_FILE = "/etc/mpd.conf"
+
+# There is an environmental variable XDG_CACHE_HOME which specifies where to
+# save cache files. However, if not set, a default of ~/.cache should be used.
+DEFAULT_CACHE_FILE = os.environ.get('XDG_CACHE_HOME',
+ os.path.join(os.environ['HOME'], ".cache"))
+DEFAULT_CACHE_FILE = os.path.expanduser(os.path.join(DEFAULT_CACHE_FILE,
+ "mpdspl/mpddb.cache"))
+
+# $XDG_DATA_HOME specifies where to save data files, in our case a record of
+# playlists which have been created. If unset a default of ~/.local/share
+# should be used.
+DEFAULT_DATA_DIR = os.environ.get('XDG_DATA_HOME',
+ os.path.join(os.environ['HOME'], ".local/share/"))
+DEFAULT_DATA_DIR = os.path.expanduser(os.path.join(DEFAULT_DATA_DIR,
+ "mpdspl"))
+
+KEYWORDS = {"ar" : ("Artist", "Artist"),
+ "al" : ("Album", "Album"),
+ "ti" : ("Title", "Title"),
+ "tn" : ("Track", "Track number"),
+ "ge" : ("Genre", "Genre"),
+ "ye" : ("Date", "Track year"),
+ "le" : ("Time", "Track duration (in seconds)"),
+ "fp" : ("file", "File full path"),
+ "fn" : ("key", "File name"),
+ "mt" : ("mtime", "File modification time"),
+ "ra" : ("Rating", "Track rating"),
+ "raar" : ("RatingAr", "Artist rating"),
+ "raal" : ("RatingAl", "Album rating"),
+ "rag" : ("RatingGe", "Genre rating"),
+ "pc" : ("PlayCount", "Play Count") }
+
+class CustomException(Exception):
+ pass
+
+class AbstractRule:
+ def __init__(self, key, operator, delimiter, value, flags):
+ if key.lower() in KEYWORDS:
+ self.key = KEYWORDS[key.lower()][0]
+ elif key.lower() in [ v[0].lower() for v in KEYWORDS.values() ]:
+ self.key = key.lower()
+ else:
+ raise CustomException("A track has no attribute '%s'" % (key,))
+
+ self.operator = operator
+ self.delimiter = delimiter
+ self.value = value
+ if flags:
+ self.flags = tuple(flags)
+ else:
+ self.flags = ()
+ self.negate = 'n' in self.flags
+
+ def __repr__(self):
+ return "%(key)s%(operator)s%(delimiter)s%(value)s%(delimiter)s flags=%(flags)s" % self.__dict__
+
+ def getOperator(self):
+ return self.OPERATORS[self.operator]
+
+ def match(self, track):
+ attr = getattr(track, self.key.lower())
+ matched = self.__match__(attr)
+
+ if self.negate:
+ matched = not matched
+ return matched
+
+class RegexRule(AbstractRule):
+ """ Search according to a regex, for instance:
+ contains foo --> =/foo/
+ contains bar, case-insensitive --> =/bar/i
+ does not contain baz --> !/foo/ """
+
+ OPERATORS = { '=' : re.search,
+ '!' : lambda *v: not re.search(*v) }
+ FLAGS = { 'i' : re.IGNORECASE,
+ 'l' : re.LOCALE }
+
+ def __init__(self, key, operator, delimiter, value, flags):
+ AbstractRule.__init__(self, key, operator,
+ delimiter, value, flags)
+ self.reFlags = 0
+ for reFlag in self.flags:
+ self.reFlags |= self.FLAGS[reFlag]
+
+ def __match__(self, value):
+ try:
+ value = str(value)
+ except:
+ value = value.encode(DEFAULT_ENCODING)
+ return self.getOperator()(self.value, value, self.reFlags)
+
+class NumberRule(AbstractRule):
+ """ Match according to a number comparison, for instance:
+ greater or equal than 30 --> >=#30#
+ lesser than 80 --> <#80# """
+
+ OPERATORS = { '=' : operator.eq,
+ '<' : operator.lt,
+ '>' : operator.gt,
+ '>=' : operator.ge,
+ '<=' : operator.ge }
+
+ def __init__(self, key, operator, delimiter, value, flags):
+ AbstractRule.__init__(self, key, operator,
+ delimiter, value, flags)
+ self.number = float(value)
+
+ def __match__(self, value):
+ if not value:
+ value = 0
+ return self.getOperator()(float(value), self.number)
+
+class TimeDeltaRule(AbstractRule):
+ """ Match according to a timedelta, for instance:
+ in the last 3 days --> <=%3days%
+ before last month --> >%1month%
+ 3 years ago --> =%3years% """
+
+ OPERATORS = { '=' : operator.eq,
+ '<' : operator.lt,
+ '>' : operator.gt,
+ '>=' : operator.ge,
+ '<=' : operator.le }
+
+ TIME_DELTA_REGEX = r'(?P<number>\d+)\s*(?P<unit>[a-zA-Z]+)'
+
+ def __init__(self, key, operator, delimiter, value, flags):
+ AbstractRule.__init__(self, key, operator,
+ delimiter, value, flags)
+
+ m = re.match(self.TIME_DELTA_REGEX, self.value)
+ if not m:
+ raise CustomException("Could not parse duration")
+ d = m.groupdict()
+ self.number = int(d['number'])
+ self.unit = d['unit'].lower()
+ if not self.unit.endswith('s'):
+ self.unit += 's'
+
+ self.value = datetime.timedelta(**{self.unit : self.number})
+ self.now = datetime.datetime.now()
+
+ def __match__(self, value):
+ delta = self.now - datetime.datetime.fromtimestamp(int(value))
+ return self.getOperator()(delta, self.value)
+
+class TimeStampRule(AbstractRule):
+ """ Match according to a timestamp, for instance:
+ before 2010-01-02 --> <@[email protected]
+ after 2009-12-20 (included) --> >[email protected]@
+ on 2009-11-18 --> [email protected]@ """
+
+ OPERATORS = { '=' : operator.eq,
+ '<' : operator.lt,
+ '>' : operator.gt,
+ '>=' : operator.ge,
+ '<=' : operator.le }
+
+ TIME_STAMP_FORMAT = '%Y-%m-%d'
+
+ def __init__(self, key, operator, delimiter, value, flags):
+ AbstractRule.__init__(self, key, operator,
+ delimiter, value, flags)
+
+ ts = time.strptime(self.value, self.TIME_STAMP_FORMAT)
+ self.value = time.mktime(ts)
+
+ def __match__(self, value):
+ # round down to the precision of TIME_STAMP_FORMAT before comparing
+ value = time.gmtime(float(value)) # in seconds since epoch
+ value = time.strftime(self.TIME_STAMP_FORMAT, value)
+ value = time.mktime(time.strptime(value, self.TIME_STAMP_FORMAT))
+ return self.getOperator()(value, self.value)
+
+class RuleFactory:
+ DELIMITER_TO_RULE = { '/' : RegexRule,
+ '%' : TimeDeltaRule,
+ '@' : TimeStampRule,
+ '#' : NumberRule }
+
+ @staticmethod
+ def getRule(ruleString):
+ m = re.match(r'(?P<key>\w+)(?P<operator>.+?)(?P<delimiter>[' +
+ ''.join(RuleFactory.DELIMITER_TO_RULE.keys()) +
+ r'])(?P<value>.+)(?P=delimiter)(?P<flags>\w+)?',
+ ruleString)
+ if not m:
+ raise CustomException("Could not parse rule '%s'" % (ruleString,))
+
+ d = m .groupdict()
+ ruleClass = RuleFactory.DELIMITER_TO_RULE[d['delimiter']]
+ return ruleClass(**d)
+
+ @staticmethod
+ def help():
+ s = ""
+ for d, r in RuleFactory.DELIMITER_TO_RULE.iteritems():
+ s += " '%s' -> %s\n" % (d, r.__doc__)
+ return s
+
+class Playlist:
+ REGEX = re.compile(r'\s*,\s*') # how we split rules in a ruleset
+ PLAYLIST_DIR = None # where to save m3u files
+ CACHE_DIR = None # where to save marshalled playlists
+
+ def __init__(self, name, ruleString):
+ self.name = name
+ self.rules = [ RuleFactory.getRule(r)
+ for r in self.REGEX.split(ruleString) ]
+ self.tracks = [] # tracks matching the rules; empty for now
+
+ @staticmethod
+ def initStaticAttributes(playlistDir, cacheDir):
+ Playlist.PLAYLIST_DIR = playlistDir
+ Playlist.CACHE_DIR = cacheDir
+
+ @staticmethod
+ def load(name):
+ obj = loadgubbage(Playlist.getSaveFile(name))
+ try:
+ assert isinstance(obj, Playlist)
+ except:
+ raise CustomException("Restoring old playlists won't work, please rm '%s'." % (playlistfile,))
+
+ return obj
+
+ def save(self):
+ savegubbage(self, Playlist.getSaveFile(self.name))
+
+ @staticmethod
+ def getSaveFile(name):
+ return os.path.join(Playlist.CACHE_DIR, name)
+
+ def findMatchingTracks(self, mpdDB):
+ self.tracks = []
+
+ for track in mpdDB.getTracks():
+ toAdd = True
+ for rule in self.rules:
+ if not rule.match(track): # Add the track if appropriate
+ toAdd = False
+ break
+
+ if toAdd:
+ self.tracks.append(track)
+
+ self.tracks.sort()
+ self.setM3u()
+
+ def setM3u(self):
+ l = [ track.file for track in self.tracks ]
+ self.m3u = '\n'.join(l)
+
+ def getM3uPath(self):
+ return os.path.join(self.PLAYLIST_DIR, self.name + ".m3u")
+
+ def writeM3u(self):
+ filePath = self.getM3uPath()
+ print "Saving playlist '%s' to '%s'" % (playlist.name, filePath)
+ codecs.open(filePath, 'w', DEFAULT_ENCODING).write(self.m3u + '\n')
+
+class PlaylistSet:
+ def __init__(self, playlists):
+ self.playlists = playlists
+
+ def addMarshalled(self, name):
+ if name in playlists.keys():
+ raise CustomException("Cowardly refusing to create a new '%s' playlist since '%s' already exists." % (name, Playlist.getSaveFile(name)))
+ playlists[name] = Playlist.load(name)
+
+ def getPlaylists(self):
+ return self.playlists.values()
+
+class Track:
+ def __init__(self):
+ # create a track object with only empty attributes
+ for key in KEYWORDS.values():
+ setattr(self, key[0].lower(), "")
+
+ def __cmp__(self, t2):
+ return cmp(self.artist +self.album + self.title,
+ t2.artist + t2.album + t2.title)
+
+ def __repr__(self):
+ return ("%(artist)s - %(album)s - %(track)s - %(title)s" % self.__dict__).encode(DEFAULT_ENCODING)
+
+class MpdDB:
+ CACHE_FILE = None # where to save marshalled DB
+
+ def __init__(self, dbFile,
+ stickerFile = None, mpdcronStatsFile = None):
+ self.dbFile = dbFile
+ self.stickerFile = stickerFile
+ self.mpdcronStatsFile = mpdcronStatsFile
+ self.tracks = {}
+ self.__parseDB()
+ if mpdcronStatsFile:
+ self.__parseMpdcronDB()
+ elif self.stickerFile:
+ self.__parseStickerDB()
+
+ @staticmethod
+ def initStaticAttributes(cacheFile):
+ MpdDB.CACHE_FILE = cacheFile
+
+ @staticmethod
+ def load():
+ obj = loadgubbage(MpdDB.CACHE_FILE)
+ try:
+ assert isinstance(obj, MpdDB)
+ tracks = obj.getTracks()
+ if len(tracks) > 1:
+ assert isinstance(tracks[-1], Track)
+ except:
+ raise CustomException("Restoring from old cache won't work, please use -f.")
+
+ return obj
+
+ def save(self):
+ savegubbage(self, MpdDB.CACHE_FILE)
+
+ @staticmethod
+ def needUpdate(dbFile, extraFile):
+ return (not os.path.isfile(MpdDB.CACHE_FILE) \
+ or os.path.getmtime(dbFile) > os.path.getmtime(MpdDB.CACHE_FILE) \
+ or (extraFile \
+ and os.path.isfile(extraFile) \
+ and os.path.getmtime(extraFile) > os.path.getmtime(MpdDB.CACHE_FILE)))
+
+ def __parseDB(self):
+ parsing = False
+
+ track = None
+ for line in codecs.open(self.dbFile, 'r', DEFAULT_ENCODING):
+ line = line.strip()
+
+ if line == "songList begin": # enter parsing mode
+ parsing = True
+ continue
+ if line == "songList end": # exit parsing mode
+ parsing = False
+ continue
+
+ if parsing:
+ if line.startswith("key: "):
+ if track is not None: # save the previous one
+ self.tracks[track.file] = track
+ track = Track() # create a new one
+
+ key, value = line.split(": ", 1)
+ setattr(track, key.lower(), value)
+
+ def __parseStickerDB(self):
+ conn = sqlite3.connect(self.stickerFile)
+
+ curs = conn.cursor()
+
+ curs.execute('SELECT * FROM sticker WHERE type=? and name=?',
+ ("song", "rating"))
+
+ for row in curs:
+ filePath = row[1]
+ if filePath in self.tracks:
+ self.tracks[filePath].rating = row[3]
+
+ def __parseMpdcronDB(self):
+ conn = sqlite3.connect(self.mpdcronStatsFile)
+
+ curs = conn.cursor()
+
+ curs.execute('''
+SELECT song.uri, song.rating, artist.rating, album.rating, genre.rating, song.play_count
+FROM song, artist, album, genre
+WHERE song.artist = artist.name
+AND song.album = album.name
+AND song.genre = genre.name
+AND song.rating + artist.rating + album.rating + genre.rating > 0''', ())
+
+ for row in curs:
+ filePath = row[0]
+ if filePath in self.tracks:
+ self.tracks[filePath].rating = row[1]
+ self.tracks[filePath].ratingar = row[2]
+ self.tracks[filePath].ratingal = row[3]
+ self.tracks[filePath].ratingge = row[4]
+ self.tracks[filePath].playcount = row[5]
+
+ def getTracks(self):
+ return self.tracks.values()
+
+class IndentedHelpFormatterWithNL(optparse.IndentedHelpFormatter):
+ """ So optparse doesn't mangle our help description. """
+ def format_description(self, description):
+ if not description: return ""
+ desc_width = self.width - self.current_indent
+ indent = " "*self.current_indent
+ bits = description.split('\n')
+ formatted_bits = [ textwrap.fill(bit,
+ desc_width,
+ initial_indent=indent,
+ subsequent_indent=indent)
+ for bit in bits]
+ result = "\n".join(formatted_bits) + "\n"
+ return result
+
+def parseargs(args):
+ parser = optparse.OptionParser(formatter=IndentedHelpFormatterWithNL(),
+ description="""Playlist ruleset:
+ Each ruleset is made of several rules, separated by commas.
+ Each rule is made of a keyword, an operator, a value to match
+ surrounded by delimiters, and several optional flags influencing the
+ match.
+ There are """ + str(len(RuleFactory.DELIMITER_TO_RULE.keys())) + \
+ """ types of rules, each defined by a specific delimiter:\n\n""" + \
+
+ RuleFactory.help() + \
+
+ """\n These available keywords are:
+""" + \
+
+ '\n'.join([ " " + k + "/" + v[0] + " : " + v[1].lower() for k, v in KEYWORDS.iteritems() ]) + \
+
+ """
+
+ For example, a rule for all tracks by 'Fred' or 'George', which have a
+ title containing (case-insensitive) 'the' and 'and', which don't
+ include the word 'when' (case-insensitive), and whose modification
+ time was in the last 3 days would be written:
+
+ ar=/(Fred|George)/ , ti=/(the.*and|and.*the)/i , ti!/when/i , mt<%3days%
+
+ Notes:
+ Paths specified in the MPD config file containing a '~' will have the
+ '~'s replaced by the user MPD runs as..""")
+
+ parser.add_option("-f", "--force-update", dest="forceUpdate",
+ action="store_true", default=False,
+ help="Force an update of the cache file and any playlists")
+
+ parser.add_option("-C", "--cache-file", dest="cacheFile",
+ default=DEFAULT_CACHE_FILE,
+ help="Location of the cache file", metavar="FILE")
+
+ parser.add_option("-D", "--data-dir", dest="dataDir",
+ default=DEFAULT_DATA_DIR,
+ help="Location of the data directory (where we save playlist info)",
+ metavar="DIR")
+
+ parser.add_option("-d", "--database-file", dest="dbFile",
+ help="Location of the MPD database file",
+ metavar="FILE")
+
+ parser.add_option("-s", "--sticker-file", dest="stickerFile",
+ help="Location of the MPD sticker file (holding ratings)",
+ metavar="FILE")
+
+ parser.add_option("-m", "--mpdcron-stats-file", dest="mpdcronStatsFile",
+ help="Location of the mpdcron stats file (holding ratings and other info)",
+ default=None,
+ metavar="FILE")
+
+ parser.add_option("-c", "--config-file", dest="configFile",
+ default=DEFAULT_MDP_CONFIG_FILE,
+ help="Location of the MPD config file",
+ metavar="FILE")
+
+ parser.add_option("-p", "--playlist-dir", dest="playlistDirectory",
+ help="Location of the MPD playlist directory",
+ metavar="DIR")
+
+ parser.add_option("-u", "--user", dest="mpdUser",
+ help="User MPD runs as", metavar="USER")
+
+ parser.add_option("-n", "--new-playlist", dest="playlists",
+ action="append", default=[], nargs=2,
+ help="Create a new playlist",
+ metavar="NAME 'RULESET'")
+
+ parser.add_option("-o", "--output-only", dest="simpleOutput",
+ action="store_true", default=False,
+ help="Only print the final track list to STDOUT")
+
+ options, args = parser.parse_args(args)
+
+ if getattr(options, "mpdcronStatsFile") and getattr(options, "stickerFile"):
+ print "Can't use -s and -m at the same time, as they both provide ratings."
+ sys.exit(2)
+
+ # we'll use dataDir=None to indicate we want simpleOutput
+ if options.simpleOutput:
+ options.dataDir = None
+
+ # go from ((name,rule),(name1,rule1),...) to {name:rule,name1:rule1,...}
+ playlists = {}
+ for name, ruleSet in options.playlists:
+ playlists[name] = Playlist(name, ruleSet)
+ options.playlists = playlists
+
+ configDict = parsempdconf(os.path.expanduser(options.configFile),
+ options.mpdUser)
+
+ # CL arguments take precedence over config file settings
+ for key in configDict:
+ if key in dir(options) and getattr(options, key):
+ configDict[key] = getattr(options, key)
+
+ if not 'stickerFile' in configDict: # need to have this one defined
+ configDict['stickerFile'] = None
+
+ return options.forceUpdate, options.cacheFile, options.dataDir, \
+ configDict['dbFile'], configDict['stickerFile'], \
+ options.mpdcronStatsFile, \
+ configDict['playlistDirectory'], options.playlists
+
+def _underscoreToCamelCase(s):
+ tokens = s.split('_')
+ s = tokens[0]
+ for token in tokens[1:]:
+ s += token.capitalize()
+ return s
+
+# Grabbing stuff from the MPD config, a very important step
+def parsempdconf(configFile, user = None):
+ configDict = {}
+ for line in open(configFile, "r"):
+ line = line.strip()
+ if line and not re.search(r'[#{}]', line):
+ key, value = re.split(r'\s+', line, 1)
+
+ key = _underscoreToCamelCase(key)
+
+ value = re.sub(r'(^"|"$)', '', value)
+
+ # account for ~/ in mpd.conf
+ if value == '~' or value.count('~/') > 0: # FIXME: others ?
+ if user:
+ value = value.replace('~', user)
+ else:
+ value = os.path.expanduser(value)
+
+ configDict[key] = value
+
+ return configDict
+
+def savegubbage(data, path):
+ if not os.path.isdir(os.path.dirname(path)):
+ os.mkdir(os.path.dirname(path))
+ cPickle.dump(data, open(path, "wb"))
+
+def loadgubbage(path):
+ return cPickle.load(open(path, "rb"))
+
+if __name__ == '__main__':
+ try:
+ forceUpdate, cacheFile, dataDir, \
+ dbFile, stickerFile, \
+ mpdcronStatsFile, \
+ playlistDir, playlists = parseargs(sys.argv[1:])
+
+ MpdDB.initStaticAttributes(cacheFile)
+ Playlist.initStaticAttributes(playlistDir, dataDir)
+
+ playlistSet = PlaylistSet(playlists)
+
+ if not os.path.isfile(dbFile): # no dbFile -> abort
+ raise CustomException("The database file '%s' could not be found" %
+ (dbFile,))
+
+ if forceUpdate or MpdDB.needUpdate(dbFile,
+ mpdcronStatsFile or stickerFile): # update cache
+ if dataDir:
+ print "Updating database cache..."
+
+ if not os.path.isdir(os.path.dirname(cacheFile)):
+ os.mkdir(os.path.dirname(cacheFile))
+
+ # create the MPD DB object
+ if mpdcronStatsFile:
+ mpdDB = MpdDB(dbFile, mpdcronStatsFile=mpdcronStatsFile)
+ else:
+ mpdDB = MpdDB(dbFile, stickerFile=stickerFile)
+
+ mpdDB.save() # save to file
+ else: # we have a valid cache file, use it
+ if dataDir:
+ print "Loading database cache..."
+ mpdDB = MpdDB.load()
+
+ if dataDir: # add pre-existing playlists to our list
+ for name in os.listdir(Playlist.CACHE_DIR):
+ playlistSet.addMarshalled(name)
+
+ for playlist in playlistSet.getPlaylists():
+ playlist.findMatchingTracks(mpdDB)
+
+ if not dataDir: # stdout
+ if playlist.m3u:
+ print playlist.m3u.encode(DEFAULT_ENCODING)
+ else: # write to .m3u & save
+ playlist.writeM3u()
+ playlist.save()
+ except CustomException, e:
+ print e.message
+ sys.exit(2)
diff --git a/mplayer b/mplayer
new file mode 100755
index 0000000..5ad4082
--- a/dev/null
+++ b/mplayer
@@ -0,0 +1,195 @@
+#!/bin/zsh
+HIST="$HOME/.mplayer.hist"
+AUX_INVO=0
+POSITION=0
+maxplayers=1
+network=0
+firstime=0
+randnum=$((${RANDOM}%${maxplayers}))
+mkdir -p /tmp/mplayer
+MPSOCKET="/tmp/mplayer/$randnum.fif"
+fixedvo=1
+typeset -A vo_map
+vo_map=(1 fixed-vo 0 nofixed-vo)
+
+pecho(){
+ msg="$1"
+ file="$2"
+ /bin/echo "$msg" >| $file &
+ sleep 0.1
+ echo < $file
+}
+
+# curl for non-strict url parsing ;)
+isurl () {
+ curl -I -s "$url" &>/dev/null || return 1
+ return 0
+}
+
+
+check_pattern(){
+ pecho "get_property filename" $1
+ if tail -2 /tmp/mplayer.log | /bin/grep 'ANS_filename' | /bin/grep -v grep ;then
+ pecho "get_property pause" $1
+ if tail -2 /tmp/mplayer.log | /bin/grep 'ANS_pause=no' | /bin/grep -v grep ;then
+ retval=0
+ return 0
+ fi
+ retval=2
+ fi
+ [[ -z $retval ]] && retval=1
+ return 1
+}
+
+
+parse(){
+if [[ -z "$1" ]];then
+ fnum=$(awk '{ print NR" "$0 }' $HIST | cut -d ":" -f 1 | uniq -f 1 | tac | ${=DMENU})
+ [[ -z "$fnum" ]] && exit
+ num=$(echo "$fnum" | cut -d " " -f 1)
+ FILE=$(head -${num} $HIST | tail -1 | cut -d ":" -f 1)
+
+ if [[ $fnum == *: ]];then
+ name="$(cut -d ' ' -f 1 <<< $FILE)"
+ POSITION="$(grep $name $HIST | tail -1 | cut -d ':' -f 2)"
+ fi
+
+else
+ if isurl "$(tr -d '.' <<< $1)";then # Fix /a/b/c/d type
+ FILE="$1"
+ network=1
+ else
+ if [[ "[email protected]" == /* ]];then
+ prefix=
+ else
+ prefix="$pwdir/"
+ fi
+ FILE="${prefix}[email protected]"
+ fi
+fi
+retval=$FILE
+}
+
+preplay(){
+ mpc pause &>/dev/null
+ ln -sf $MPSOCKET /tmp/mplayer.fif || exit 1
+}
+
+check_status(){
+if [[ $fixedvo == 1 ]];then
+ pidof mplayer || return 1
+ echo >>| /tmp/mplayer.log
+ retval=0
+ for ff in /tmp/mplayer/*.fif;do
+ if check_pattern $ff;then
+ return $retval
+ fi
+ done
+ return $retval
+else
+ if xdotool search --class mplayer;then
+ return 1
+ else
+ return 0
+ fi
+fi
+}
+
+aux(){
+case $1 in
+ status)
+ check_status
+ return $?
+ ;;
+ clean)
+ rm /tmp/mplayer/* 2>/dev/null
+ rm /tmp/mplayer.fif 2>/dev/null
+ killall -9 mplayer 2>/dev/null
+ rm /tmp/locks/mplayer* 2>/dev/null
+ mv /tmp/mplayer.log /tmp/.mplayer.log
+ exit
+ ;;
+ pause)
+ pecho 'pause' $MPSOCKET
+ ;;
+ snext)
+ pecho 'seek +10' $MPSOCKET
+ ;;
+ sprev)
+ pecho 'seek -10' $MPSOCKET
+ ;;
+ pnext)
+ pecho "pt_step 1" $MPSOCKET
+ ;;
+ pprev)
+ pecho "pt_step -1" $MPSOCKET
+ ;;
+ stop)
+ wheret
+ echo "$retval" >>| ~/.mplayer.hist
+ pecho 'stop' $MPSOCKET
+ echo "#STOPPED ${file}" >>| /tmp/mplayer.log
+ ;;
+ *)
+ return 1
+esac
+}
+
+wheret(){
+ pecho 'get_time_pos' $MPSOCKET
+ pecho 'get_property path' $MPSOCKET
+ sleep 3
+ LOG="tail -5 /tmp/mplayer.log"
+ position=$(${=LOG} | /bin/grep ANS_TIME_POSITION | head -1 | cut -d "=" -f 2)
+ file=$(${=LOG} | /bin/grep ANS_path | head -1 | cut -d "=" -f 2)
+ retval="$file:$position"
+}
+
+if [[ $0 == *mplayeraux ]];then
+ AUX_INVO=1
+ exit $?
+fi
+if [[ ! -e $MPSOCKET ]] ;then
+ firstime=1
+ mkfifo $MPSOCKET
+ ln -sf $MPSOCKET /tmp/mplayer.fif
+ notify-send "Mplayer" "mplayer starting up....."
+fi
+
+exec &>>|/tmp/mplayer.log
+
+
+pwdir=$(pwd)
+
+
+pushd $HOME
+${=LOCK}/mplayer.$randnum /usr/bin/mplayer -$vo_map[$fixedvo] -msglevel all=4 -slave -idle -input file=$MPSOCKET -input conf=$HOME/.mplayer/input.conf.mine &!
+popd
+
+
+FILE=$retval
+preplay
+
+[[ $firstime == 1 ]] && sleep 3
+
+pecho "loadfile \"$FILE\" 2" /tmp/mplayer.fif
+
+check_status
+
+# Queue or play
+if [[ $? == 1 ]];then
+ notify-send "Player" "^i(/home/raghavendra/.notify-icons/mplayer.xpm) ^fg(red)FILE:^fg() $FILE:t queued"
+else
+ if [[ ! $POSITION == 0* ]];then
+ sleep 1
+ pecho "set_property time_pos $POSITION" $MPSOCKET
+ notify-send "Player" "^i(/home/raghavendra/.notify-icons/mplayer.xpm) ^fg(red)FILE:^fg() $FILE:t ^fg(green)POSITION:^fg() ${POSITION:-0:0}"
+ else
+ notify-send "Player" "^i(/home/raghavendra/.notify-icons/mplayer.xpm) ^fg(red)FILE:^fg() $FILE:t"
+ fi
+fi
+/bin/echo "mplayer \"$FILE\"" >>| ~/.current
+# vim: set foldmethod=marker:
diff --git a/mplayeraux b/mplayeraux
new file mode 120000
index 0000000..1a123cc
--- a/dev/null
+++ b/mplayeraux
@@ -0,0 +1 @@
+mplayer \ No newline at end of file
diff --git a/muxSend b/muxSend
new file mode 100755
index 0000000..c70be37
--- a/dev/null
+++ b/muxSend
@@ -0,0 +1,17 @@
+#!/bin/bash
+if [[ $# < 1 ]];then
+ exit
+fi
+
+
+for sess in $(tmux list-sessions | cut -d ":" -f 1);do
+ tmux set-buffer "$data"
+ for window in $(tmux list-windows -t $sess | tr -d ' ');do
+ if grep -q -E "(zsh|bash)" <<< $window;then
+ tmux paste-buffer -t $sess:${window%:*}
+ tmux send-keys -t $sess:${window%:*} "Enter"
+ fi
+ done
+done
+# vim: set ft=sh
diff --git a/nbookmark b/nbookmark
new file mode 100755
index 0000000..32a1ddf
--- a/dev/null
+++ b/nbookmark
@@ -0,0 +1,9 @@
+#!/bin/sh
+# this is a simple example script that demonstrates how bookmarking plugins for newsbeuter are implemented
+# (c) 2007 Andreas Krennmair
+
+url="$1"
+title="$2"
+description="$3"
+
+echo -e "${url}\t${title}\t${description}" >> ~/.local/share/newsbeuter/bookmarks
diff --git a/noter b/noter
new file mode 100755
index 0000000..99c53e2
--- a/dev/null
+++ b/noter
@@ -0,0 +1,30 @@
+#!/bin/zsh
+#ADD scrot
+# Pass arguments as is
+set -x
+root="$HOME/bin"
+title=$(xwininfo -id `xdotool getactivewindow` | tr '"' '#' | perl -lne 'if ($_ =~ m/.*#(.*)#$/){ print $1; }' | cut -d ':' -f1,2,4,5)
+case $title in
+ *firefox*|*Vimper*) bin=fnoter; args=""; ;;
+ *uake*|*term*|*)
+ bin="generic";
+ pid=$(xdotool getactivewindow getwindowpid)
+ #pid=$(pgrep $title
+ args="${title//\//}:$pid"
+ #args="$title"
+ ;;
+esac
+
+if [[ -z $1 || $1 =~ a.* ]];then
+ file=$($root/noters/$bin add $args 2>/tmp/vlog || (notify-send "Error in calling $bin" && kill -9 $$))
+ [[ -z $file || ! -f $file ]] && notify-send "FATAL: $file not found" && exit 1
+ add_text="$(zenity --text 'Additional tags if any' --entry)"
+ [[ ! -z $add_text ]] && echo "CTAGS: $add_text" >> $file || exit 1
+ echo -e "====================================End Of note============================\n\n" >> $file || exit 1
+ notify-send "Note added at $file for $title"
+else
+ $root/noters/$bin "[email protected]"
+fi
+
+
+set +x
diff --git a/notify-send b/notify-send
new file mode 100755
index 0000000..574f1d6
--- a/dev/null
+++ b/notify-send
@@ -0,0 +1,137 @@
+#!/bin/zsh
+maxthres=5
+thres=$maxthres
+duration=5
+
+xnotify(){
+ heading="${(Q)1}"
+ shift
+ rest=${(Q)rest}
+ text=$(echo "^fg(green)$heading[1,15]: ^fg(grey85)$rest" | perl -pe 'if (!eof()) { s/\n/ | /g }' | tr -dc '[:print:]' )
+ echo "$text" | dzen2 -p $duration -xs 2 -y -1 &!
+}
+cnotify(){
+ tmux display-message '[email protected]'
+ #muxSend a "echo [email protected] | head -1 | tr '\n' ' | ' | tr -dc '[:print:]' "
+}
+
+type=xnotify
+
+export DISPLAY=":0"
+pidof X &>/dev/null || type=cnotify
+
+NFILE="/dev/shm/.notify-counter"
+
+if [[ $1 == "-f" ]];then
+ shift
+ duration=2
+ exit
+fi
+
+if [[ ! -f $NFILE ]];then
+ temp=$(date +%s)
+ echo "$temp|shot|$maxthres" >| $NFILE
+ echo " " >> $NFILE
+ njiffy=$temp
+ mode=shot
+ time=$temp
+ difference=100
+else
+ t=$(< $NFILE)
+ temp=$t[(f)1]
+ p_subject=$t[(f)2]
+ njiffy=$temp[(ws:|:)1]
+ mode=$temp[(ws:|:)2]
+ thres=$temp[(ws:|:)3]
+ time=$(date +%s)
+ difference=$(( $time - $njiffy ))
+fi
+
+
+if [[ $mode == temper || ($difference -lt 2 && $1 == $p_subject ) ]];then
+ if [[ $thres == 0 ]];then
+ if [[ ! $mode == "temper" ]];then
+ notify-send -f "Warning" "Too many notifications - check:/tmp/notifications"
+ mode="temper"
+ fi
+
+ echo "$time|$mode|$maxthres" >| $NFILE
+ echo "$1" >> $NFILE
+ echo "$(date -R): [email protected]" >>| /tmp/notifications
+ exit
+ else
+ thres=$(( (thres-1)%$maxthres ))
+ fi
+
+elif [[ $difference -lt 10 ]];then
+ mode="batch"
+ thres=10
+ duration=2
+else
+ mode="shot"
+ duration=7
+ thres=10
+fi
+echo "$time|$mode|$thres" >| $NFILE
+echo "$1" >> $NFILE
+#set +x
+
+prgm=$0
+
+if [[ -n $1 ]];then
+ param="${(qq)1}"
+ shift
+ mlog="${(qq)@}"
+ if [[ ! -t 0 ]];then
+ while read LINE
+ do
+ mlog="$mlog ${(qq)LINE}"
+ done
+ fi
+else
+ if [[ -t 0 ]];then
+ notify-send "Warning" "Wrong input"
+ exit
+ fi
+ param="Subject"
+ tempReply=""
+ while read LINE
+ do
+ tempReply="$tempReply ${(qq)LINE}"
+ done
+ echo -e "STDIN \n$(date '+%F %H:%M') ==> $param: $tempReply\n" >>| ~/.notify-history
+ $type "$param" "$tempReply"
+ exit
+fi
+
+if [[ $prgm == *aria* ]];then
+ param="aria2c"
+fi
+
+if [[ $prgm == *beuter* ]];then
+ mlog="^i(/home/raghavendra/.notify-icons/beuter.xpm)$param $mlog"
+ param="beuter"
+fi
+
+echo -e "\n$(date '+%F %H:%M') ==> $mlog\n" >> ~/.notify-history
+
+case $param in
+ *rtorrent*)
+ $type "Torrent download" "^i(/home/raghavendra/.notify-icons/torrent.xpm)$mlog download complete!"
+ consume $HOME/.dque
+ ;;
+ *aria2c*)
+ message=$(tail -1 ~/.download_history |cut -d ' ' -f 1 | cut -d : -f 2-)
+ $type "Aria2:" "^i(/home/raghavendra/.notify-icons/download.xpm)${message:t} download complete"
+ consume $HOME/.dque
+ ;;
+ *beuter*)
+ $type "Newsbeuter" "$mlog"
+ ;;
+ *mail*)
+ $type "New Mail" "$mlog"
+ ;;
+ *) $type "$param" "$mlog"
+esac
diff --git a/notify-send-aria2 b/notify-send-aria2
new file mode 120000
index 0000000..5ec9bce
--- a/dev/null
+++ b/notify-send-aria2
@@ -0,0 +1 @@
+./notify-send \ No newline at end of file
diff --git a/notify-send-beuter b/notify-send-beuter
new file mode 120000
index 0000000..5ec9bce
--- a/dev/null
+++ b/notify-send-beuter
@@ -0,0 +1 @@
+./notify-send \ No newline at end of file
diff --git a/otv b/otv
new file mode 100755
index 0000000..8083d56
--- a/dev/null
+++ b/otv
@@ -0,0 +1,14 @@
+#!/bin/zsh
+
+type=$1
+case $type in
+*jaz*)
+ rtmpdump -v -r rtmp://livestfslivefs.fplive.net/livestfslive-live/ -y "aljazeera_en_high?videoId=747084146001&lineUpId=&pubId=665003303001&playerId=751182905001&affiliateId=" -W "http://admin.brightcove.com/viewer/us1.24.04.08.2011-01-14072625/federatedVideoUI/BrightcovePlayer.swf -p "http://english.aljazeera.net/watch_now/ -a "aljazeeraflashlive-live?videoId=747084146001&lineUpId=&pubId=665003303001&playerId=751182905001&affiliateId=" | /usr/bin/mplayer -really-quiet -
+ ;;
+ *rus*)
+ rtmpdump -v -r rtmp://fms5.visionip.tv/live -a live -W http://rt.com/s/swf/player5.4.viral.swf -p http://rt.com/on-air/ -y RT_3 | /usr/bin/mplayer -really-quiet -
+ ;;
+ *)
+ exit 2
+ ;;
+esac
diff --git a/pacman-color b/pacman-color
new file mode 100755
index 0000000..0020ff3
--- a/dev/null
+++ b/pacman-color
@@ -0,0 +1,4 @@
+#!/bin/zsh
+BIN="/usr/bin/pacman"
+[[ -x /usr/bin/pacman-color ]] && BIN="/usr/bin/pacman-color"
diff --git a/parallel b/parallel
new file mode 100755
index 0000000..7a770dd
--- a/dev/null
+++ b/parallel
@@ -0,0 +1,5184 @@
+#!/usr/bin/perl -w
+
+=head1 NAME
+
+parallel - build and execute shell command lines from standard input in parallel
+
+=head1 SYNOPSIS
+
+B<parallel> [options] [I<command> [arguments]] < list_of_arguments
+
+B<parallel> [options] [I<command> [arguments]] B<:::> arguments
+
+B<parallel> [options] [I<command> [arguments]] B<::::> argfile(s)
+
+B<parallel> --semaphore [options] I<command>
+
+B<#!/usr/bin/parallel> --shebang [options] [I<command> [arguments]]
+
+=head1 DESCRIPTION
+
+GNU B<parallel> is a shell tool for executing jobs concurrently locally
+or using remote computers. A job is typically a single command or a
+small script that has to be run for each of the lines in the
+input. The typical input is a list of files, a list of hosts, a list
+of users, a list of URLs, or a list of tables.
+
+If you use B<xargs> today you will find GNU B<parallel> very easy to
+use as GNU B<parallel> is written to have the same options as
+B<xargs>. If you write loops in shell, you will find GNU B<parallel>
+may be able to replace most of the loops and make them run faster by
+running several jobs simultaneously. If you use B<ppss> or B<pexec> you
+will find GNU B<parallel> will often make the command easier to read.
+
+GNU B<parallel> makes sure output from the commands is the same output
+as you would get had you run the commands sequentially. This makes it
+possible to use output from GNU B<parallel> as input for other
+programs.
+
+For each line of input GNU B<parallel> will execute I<command> with
+the line as arguments. If no I<command> is given, the line of input is
+executed. Several lines will be run in parallel. GNU B<parallel> can
+often be used as a substitute for B<xargs> or B<cat | bash>.
+
+Before looking at the options you may want to check out the B<EXAMPLE>s
+after the list of options. That will give you an idea of what GNU
+B<parallel> is capable of.
+
+You can also watch the intro video for a quick introduction:
+http://www.youtube.com/watch?v=OpaiGYxkSuQ or at
+http://tinyogg.com/watch/TORaR/ and http://tinyogg.com/watch/hfxKj/
+
+=head1 OPTIONS
+
+=over 9
+
+=item I<command>
+
+Command to execute. If I<command> or the following arguments contain
+{} every instance will be substituted with the input line. Setting a
+command also invokes B<--file>.
+
+If I<command> is given, GNU B<parallel> will behave similar to B<xargs>. If
+I<command> is not given GNU B<parallel> will behave similar to B<cat | sh>.
+
+
+=item B<{}>
+
+Input line. This is the default replacement string and will normally
+be used for putting the argument in the command line. It can be
+changed with B<-I>.
+
+
+=item B<{.}>
+
+Input line without extension. This is a specialized replacement string
+with the extension removed. If the input line contains B<.> after the
+last B</> the last B<.> till the end of the string will be removed and
+B<{.}> will be replaced with the remaining. E.g. I<foo.jpg> becomes
+I<foo>, I<subdir/foo.jpg> becomes I<subdir/foo>, I<sub.dir/foo.jpg>
+becomes I<sub.dir/foo>, I<sub.dir/bar> remains I<sub.dir/bar>. If the
+input line does not contain B<.> it will remain unchanged.
+
+B<{.}> can be used the same places as B<{}>. The replacement string
+B<{.}> can be changed with B<-U>.
+
+
+=item B<{/}> (unimplemented)
+
+Basename of input line. This is a specialized replacement string
+with the directory part removed.
+
+B<{/}> can be used the same places as B<{}>. The replacement string
+B<{/}> can be changed with B<--basenamereplace>.
+
+
+=item B<{/.}> (unimplemented)
+
+Basename of input line without extension. This is a specialized
+replacement string with the directory and extension part removed. It
+is a combination of B<{/}> and B<{.}>.
+
+B<{/.}> can be used the same places as B<{}>. The replacement string
+B<{/.}> can be changed with B<--basenameextensionreplace>.
+
+
+=item B<{>I<n>B<}>
+
+Argument from argument file I<n> or the I<n>'th argument. See B<-a>
+and B<-N>.
+
+B<{>I<n>B<}> can be used the same places as B<{}>.
+
+
+=item B<{>I<n>.B<}>
+
+Argument from argument file I<n> or the I<n>'th argument without
+extension. It is a combination of B<{>I<n>B<}> and B<{.}>.
+
+B<{>I<n>.B<}> can be used the same places as B<{>I<n>B<}>.
+
+
+=item B<{>I<n>/B<}> (unimplemented)
+
+Basename of argument from argument file I<n> or the I<n>'th argument.
+It is a combination of B<{>I<n>B<}> and B<{/}>. See B<-a> and B<-N>.
+
+B<{>I<n>/B<}> can be used the same places as B<{>I<n>B<}>.
+
+
+=item B<{>I<n>/.B<}> (unimplemented)
+
+Basename of argument from argument file I<n> or the I<n>'th argument
+without extension. It is a combination of B<{>I<n>B<}>, B<{/}>, and
+B<{.}>. See B<-a> and B<-N>.
+
+B<{>I<n>/.B<}> can be used the same places as B<{>I<n>B<}>.
+
+
+
+=item B<:::> I<arguments>
+
+Use arguments from the command line as input instead of from stdin
+(standard input). Unlike other options for GNU B<parallel> B<:::> is
+placed after the I<command> and before the arguments.
+
+The following are equivalent:
+
+ (echo file1; echo file2) | parallel gzip
+ parallel gzip ::: file1 file2
+ parallel gzip {} ::: file1 file2
+ parallel --arg-sep ,, gzip {} ,, file1 file2
+ parallel --arg-sep ,, gzip ,, file1 file2
+ parallel ::: "gzip file1" "gzip file2"
+
+To avoid treating B<:::> as special use B<--arg-sep> to set the
+argument separator to something else. See also B<--arg-sep>.
+
+stdin (standard input) will be passed to the first process run.
+
+If B<--arg-file> is set arguments from that file will be appended.
+
+
+=item B<::::> I<argfiles>
+
+Another way to write B<-a> I<argfile1> B<-a> I<argfile2> ...
+
+See B<-a>.
+
+
+=item B<--null>
+
+=item B<-0>
+
+Use NUL as delimiter. Normally input lines will end in \n
+(newline). If they end in \0 (NUL), then use this option. It is useful
+for processing arguments that may contain \n (newline).
+
+
+=item B<--arg-file> I<input-file>
+
+=item B<-a> I<input-file>
+
+Read items from the file I<input-file> instead of stdin (standard input). If
+you use this option, stdin is given to the first process run.
+Otherwise, stdin is redirected from /dev/null.
+
+If multiple B<-a> are given, one line will be read from each of the
+files. The arguments can be accessed in the command as B<{1}>
+.. B<{>I<n>B<}>, so B<{1}> will be a line from the first file, and
+B<{6}> will refer to the line with the same line number from the 6th
+file.
+
+
+=item B<--arg-file-sep> I<sep-str> (beta testing)
+
+Use I<sep-str> instead of B<::::> as separator string between command
+and argument files. Useful if B<::::> is used for something else by the
+command.
+
+See also: B<::::>.
+
+
+=item B<--arg-sep> I<sep-str> (beta testing)
+
+Use I<sep-str> instead of B<:::> as separator string. Useful if B<:::>
+is used for something else by the command.
+
+Also useful if you command uses B<:::> but you still want to read
+arguments from stdin (standard input): Simply change B<--arg-sep> to a
+string that is not in the command line.
+
+See also: B<:::>.
+
+
+=item B<--basefile> I<file>
+
+=item B<-B> I<file>
+
+I<file> will be transferred to each sshlogin before a jobs is
+started. It will be removed if B<--cleanup> is active. The file may be
+a script to run or some common base data needed for the jobs.
+Multiple B<-B> can be specified to transfer more basefiles. The
+I<file> will be transferred the same way as B<--transfer>.
+
+
+=item B<--basenamereplace> I<replace-str> (unimplemented)
+
+Use the replacement string I<replace-str> instead of B<{/}> for basename of input line.
+
+
+=item B<--basenameextensionreplace> I<replace-str> (unimplemented)
+
+Use the replacement string I<replace-str> instead of B<{/.}> for basename of input line without extension.
+
+
+=item B<--bg> (beta testing)
+
+Run command in background thus GNU B<parallel> will not wait for
+completion of the command before exiting. This is the default if
+B<--semaphore> is set.
+
+See also: B<--fg>
+
+Implies B<--semaphore>.
+
+
+=item B<--cleanup>
+
+Remove transferred files. B<--cleanup> will remove the transferred files
+on the remote server after processing is done.
+
+ find log -name '*gz' | parallel \
+ --sshlogin server.example.com --transfer --return {.}.bz2 \
+ --cleanup "zcat {} | bzip -9 >{.}.bz2"
+
+With B<--transfer> the file transferred to the remote server will be
+removed on the remote server. Directories created will not be removed
+- even if they are empty.
+
+With B<--return> the file transferred from the remote server will be
+removed on the remote server. Directories created will not be removed
+- even if they are empty.
+
+B<--cleanup> is ignored when not used with B<--transfer> or B<--return>.
+
+
+=item B<--colsep> I<regexp> (beta testing)
+
+=item B<-C> I<regexp> (beta testing)
+
+Column separator. The input will be treated as a table with I<regexp>
+separating the columns. The n'th column can be access using
+B<{>I<n>B<}> or B<{>I<n>.B<}>. E.g. B<{3}> is the 3rd column.
+
+B<--colsep> implies B<--trim rl>.
+
+I<regexp> is a Perl Regular Expression:
+http://perldoc.perl.org/perlre.html
+
+
+=item B<--command>
+
+=item B<-c> (Use B<--command> as B<-c> may be removed in later versions)
+
+Line is a command. The input line contains more than one argument or
+the input line needs to be evaluated by the shell. This is the default
+if I<command> is not set. Can be reversed with B<--file>.
+
+Most people will never need this because GNU B<parallel> normally
+selects the correct B<--file> or B<--command>.
+
+
+=item B<--delimiter> I<delim>
+
+=item B<-d> I<delim>
+
+Input items are terminated by the specified character. Quotes and
+backslash are not special; every character in the input is taken
+literally. Disables the end-of-file string, which is treated like any
+other argument. This can be used when the input consists of simply
+newline-separated items, although it is almost always better to design
+your program to use --null where this is possible. The specified
+delimiter may be a single character, a C-style character escape such
+as \n, or an octal or hexadecimal escape code. Octal and
+hexadecimal escape codes are understood as for the printf command.
+Multibyte characters are not supported.
+
+=item B<-E> I<eof-str>
+
+Set the end of file string to eof-str. If the end of file string
+occurs as a line of input, the rest of the input is ignored. If
+neither B<-E> nor B<-e> is used, no end of file string is used.
+
+
+=item B<--eof>[=I<eof-str>]
+
+=item B<-e>[I<eof-str>]
+
+This option is a synonym for the B<-E> option. Use B<-E> instead,
+because it is POSIX compliant for B<xargs> while this option is not.
+If I<eof-str> is omitted, there is no end of file string. If neither
+B<-E> nor B<-e> is used, no end of file string is used.
+
+
+=item B<--eta>
+
+Show the estimated number of seconds before finishing. This forces GNU
+B<parallel> to read all jobs before starting to find the number of
+jobs. GNU B<parallel> normally only reads the next job to run.
+Implies B<--progress>.
+
+
+=item B<--fg> (beta testing)
+
+Run command in foreground thus GNU B<parallel> will wait for
+completion of the command before exiting.
+
+See also: B<--bg>
+
+Implies B<--semaphore>.
+
+
+=item B<--file>
+
+=item B<-f> (Use B<--file> as B<-f> may be removed in later versions)
+
+Line is a filename. The input line contains a filename that will be
+quoted so it is not evaluated by the shell. This is the default if
+I<command> is set. Can be reversed with B<--command>.
+
+Most people will never need this because GNU B<parallel> normally
+selects the correct B<--file> or B<--command>.
+
+
+=item B<--group>
+
+=item B<-g>
+
+Group output. Output from each jobs is grouped together and is only
+printed when the command is finished. STDERR first followed by STDOUT.
+B<-g> is the default. Can be reversed with B<-u>.
+
+=item B<--help>
+
+=item B<-h>
+
+Print a summary of the options to GNU B<parallel> and exit.
+
+
+=item B<--halt-on-error> <0|1|2>
+
+=item B<-H> <0|1|2>
+
+=over 3
+
+=item 0
+
+Do not halt if a job fails. Exit status will be the number of jobs
+failed. This is the default.
+
+=item 1
+
+Do not start new jobs if a job fails, but complete the running jobs
+including cleanup. The exit status will be the exit status from the
+last failing job.
+
+=item 2
+
+Kill off all jobs immediately and exit without cleanup. The exit
+status will be the exit status from the failing job.
+
+=back
+
+
+=item B<-I> I<replace-str>
+
+Use the replacement string I<replace-str> instead of {}.
+
+
+=item B<--replace>[=I<replace-str>]
+
+=item B<-i>[I<replace-str>]
+
+This option is a synonym for B<-I>I<replace-str> if I<replace-str> is
+specified, and for B<-I>{} otherwise. This option is deprecated;
+use B<-I> instead.
+
+
+=item B<--jobs> I<N>
+
+=item B<-j> I<N>
+
+=item B<--max-procs> I<N>
+
+=item B<-P> I<N>
+
+Run up to N jobs in parallel. 0 means as many as possible. Default is
+9.
+
+If B<--semaphore> is set default is 1 thus making a mutex.
+
+
+=item B<--jobs> I<+N>
+
+=item B<-j> I<+N>
+
+=item B<--max-procs> I<+N>
+
+=item B<-P> I<+N>
+
+Add N to the number of CPU cores. Run this many jobs in parallel. For
+compute intensive jobs B<-j> +0 is useful as it will run
+number-of-cpu-cores jobs simultaneously. See also
+B<--use-cpus-instead-of-cores>.
+
+
+=item B<--jobs> I<-N>
+
+=item B<-j> I<-N>
+
+=item B<--max-procs> I<-N>
+
+=item B<-P> I<-N>
+
+Subtract N from the number of CPU cores. Run this many jobs in parallel.
+If the evaluated number is less than 1 then 1 will be used. See also
+B<--use-cpus-instead-of-cores>.
+
+
+=item B<--jobs> I<N>%
+
+=item B<-j> I<N>%
+
+=item B<--max-procs> I<N>%
+
+=item B<-P> I<N>%
+
+Multiply N% with the number of CPU cores. Run this many jobs in parallel.
+If the evaluated number is less than 1 then 1 will be used. See also
+B<--use-cpus-instead-of-cores>.
+
+
+=item B<--jobs> I<procfile> (beta test)
+
+=item B<-j> I<procfile> (beta test)
+
+=item B<--max-procs> I<procfile> (beta test)
+
+=item B<-P> I<procfile> (beta test)
+
+Read parameter from file. Use the content of I<procfile> as parameter
+for I<-j>. E.g. I<procfile> could contain the string 100% or +2 or
+10. If I<procfile> is changed when a job completes, I<procfile> is
+read again and the new number of jobs is computed. If the number is
+lower than before, running jobs will be allowed to finish but new jobs
+will not be started until the wanted number of jobs has been reached.
+This makes it possible to change the number of simultaneous running
+jobs while GNU B<parallel> is running.
+
+
+=item B<--keeporder>
+
+=item B<-k>
+
+Keep sequence of output same as the order of input. If jobs 1 2 3 4
+end in the sequence 3 1 4 2 the output will still be 1 2 3 4.
+
+
+=item B<-L> I<max-lines>
+
+Use at most I<max-lines> nonblank input lines per command line.
+Trailing blanks cause an input line to be logically continued on the
+next input line.
+
+Implies B<-X> unless B<-m> is set.
+
+
+=item B<--max-lines>[=I<max-lines>]
+
+=item B<-l>[I<max-lines>]
+
+Synonym for the B<-L> option. Unlike B<-L>, the I<max-lines> argument
+is optional. If I<max-lines> is not specified, it defaults to one.
+The B<-l> option is deprecated since the POSIX standard specifies
+B<-L> instead.
+
+Implies B<-X> unless B<-m> is set.
+
+
+=item B<--controlmaster> (experimental)
+
+=item B<-M> (experimental)
+
+Use ssh's ControlMaster to make ssh connections faster. Useful if jobs
+run remote and are very fast to run. This is disabled for sshlogins
+that specify their own ssh command.
+
+
+=item B<--xargs>
+
+=item B<-m>
+
+Multiple. Insert as many arguments as the command line length
+permits. If B<{}> is not used the arguments will be appended to the
+line. If B<{}> is used multiple times each B<{}> will be replaced
+with all the arguments.
+
+Support for B<-m> with B<--sshlogin> is limited and may fail.
+
+See also B<-X> for context replace. If in doubt use B<-X> as that will
+most likely do what is needed.
+
+
+=item B<--progress>
+
+Show progress of computations. List the computers involved in the task
+with number of CPU cores detected and the max number of jobs to
+run. After that show progress for each computer: number of running
+jobs, number of completed jobs, and percentage of all jobs done by
+this computer. The percentage will only be available after all jobs
+have been scheduled as GNU B<parallel> only read the next job when
+ready to schedule it - this is to avoid wasting time and memory by
+reading everything at startup.
+
+By sending GNU B<parallel> SIGUSR2 you can toggle turning on/off
+B<--progress> on a running GNU B<parallel> process.
+
+
+=item B<--max-args>=I<max-args>
+
+=item B<-n> I<max-args>
+
+Use at most I<max-args> arguments per command line. Fewer than
+I<max-args> arguments will be used if the size (see the B<-s> option)
+is exceeded, unless the B<-x> option is given, in which case
+GNU B<parallel> will exit.
+
+Implies B<-X> unless B<-m> is set.
+
+
+=item B<--max-replace-args>=I<max-args> (beta test)
+
+=item B<-N> I<max-args> (beta test)
+
+Use at most I<max-args> arguments per command line. Like B<-n> but
+also makes replacement strings B<{1}> .. B<{>I<max-args>B<}> that
+represents argument 1 .. I<max-args>. If too few args the B<{>I<n>B<}> will
+be empty.
+
+This will set the owner of the homedir to the user:
+
+B<tr ':' '\012' < /etc/passwd | parallel -N7 chown {1} {6}>
+
+Implies B<-X> unless B<-m> is set.
+
+
+=item B<--max-line-length-allowed>
+
+Print the maximal number characters allowed on the command line and
+exit (used by GNU B<parallel> itself to determine the line length
+on remote computers).
+
+
+=item B<--number-of-cpus>
+
+Print the number of physical CPUs and exit (used by GNU B<parallel>
+itself to determine the number of physical CPUs on remote computers).
+
+
+=item B<--number-of-cores>
+
+Print the number of CPU cores and exit (used by GNU B<parallel> itself
+to determine the number of CPU cores on remote computers).
+
+
+=item B<--interactive>
+
+=item B<-p>
+
+Prompt the user about whether to run each command line and read a line
+from the terminal. Only run the command line if the response starts
+with 'y' or 'Y'. Implies B<-t>.
+
+
+=item B<--profile> I<profilename>
+
+=item B<-J> I<profilename>
+
+Use profile I<profilename> for options. This is useful if you want to
+have multiple profiles. You could have one profile for running jobs in
+parallel on the local machine and a different profile for running jobs
+on remote machines. See the section PROFILE FILES for examples.
+
+I<profilename> corresponds to the file ~/.parallel/I<profilename>.
+
+Default: config
+
+=item B<--quote>
+
+=item B<-q>
+
+Quote I<command>. This will quote the command line so special
+characters are not interpreted by the shell. See the section
+QUOTING. Most people will never need this. Quoting is disabled by
+default.
+
+
+=item B<--no-run-if-empty>
+
+=item B<-r>
+
+If the stdin (standard input) only contains whitespace, do not run the command.
+
+
+=item B<--retries> I<n>
+
+If a job fails, retry it on another computer. Do this I<n> times. If
+there are fewer than I<n> computers in B<--sshlogin> GNU parallel will
+re-use the computers. This is useful if some jobs fail for no apparent
+reason (such as network failure).
+
+
+=item B<--return> I<filename>
+
+Transfer files from remote servers. B<--return> is used with
+B<--sshlogin> when the arguments are files on the remote servers. When
+processing is done the file I<filename> will be transferred
+from the remote server using B<rsync> and will be put relative to
+the default login dir. E.g.
+
+ echo foo/bar.txt | parallel \
+ --sshlogin server.example.com --return {.}.out touch {.}.out
+
+This will transfer the file I<$HOME/foo/bar.out> from the server
+I<server.example.com> to the file I<foo/bar.out> after running
+B<touch foo/bar.out> on I<server.example.com>.
+
+ echo /tmp/foo/bar.txt | parallel \
+ --sshlogin server.example.com --return {.}.out touch {.}.out
+
+This will transfer the file I</tmp/foo/bar.out> from the server
+I<server.example.com> to the file I</tmp/foo/bar.out> after running
+B<touch /tmp/foo/bar.out> on I<server.example.com>.
+
+Multiple files can be transferred by repeating the options multiple
+times:
+
+ echo /tmp/foo/bar.txt | \
+ parallel --sshlogin server.example.com \
+ --return {.}.out --return {.}.out2 touch {.}.out {.}.out2
+
+B<--return> is often used with B<--transfer> and B<--cleanup>.
+
+B<--return> is ignored when used with B<--sshlogin :> or when not used
+with B<--sshlogin>.
+
+
+=item B<--max-chars>=I<max-chars>
+
+=item B<-s> I<max-chars>
+
+Use at most I<max-chars> characters per command line, including the
+command and initial-arguments and the terminating nulls at the ends of
+the argument strings. The largest allowed value is system-dependent,
+and is calculated as the argument length limit for exec, less the size
+of your environment. The default value is the maximum.
+
+Implies B<-X> unless B<-m> is set.
+
+
+=item B<--show-limits>
+
+Display the limits on the command-line length which are imposed by the
+operating system and the B<-s> option. Pipe the input from /dev/null
+(and perhaps specify --no-run-if-empty) if you don't want GNU B<parallel>
+to do anything.
+
+
+=item B<--semaphore> (beta testing)
+
+Work as a counting semaphore. B<--semaphore> will cause GNU
+B<parallel> to start I<command> in the background. When the number of
+simultaneous jobs is reached, GNU B<parallel> will wait for one of
+these to complete before starting another command.
+
+B<--semaphore> implies B<--bg> unless B<--fg> is specified.
+
+B<--semaphore> implies B<--semaphorename `tty`> unless
+B<--semaphorename> is specified.
+
+Used with B<--fg>, B<--wait>, and B<--semaphorename>.
+
+The command B<sem> is an alias for B<parallel --semaphore>.
+
+
+=item B<--semaphorename> I<name> (beta testing)
+
+=item B<--id> I<name>
+
+The name of the semaphore to use. The semaphore can be shared between
+multiple processes.
+
+Implies B<--semaphore>.
+
+
+=item B<--semaphoretimeout> I<secs> (not implemented)
+
+If the semaphore is not released within secs seconds, take it anyway.
+
+Implies B<--semaphore>.
+
+
+=item B<--skip-first-line>
+
+Do not use the first line of input (used by GNU B<parallel> itself
+when called with B<--shebang>).
+
+
+=item B<-S> I<[ncpu/]sshlogin[,[ncpu/]sshlogin[,...]]>
+
+=item B<--sshlogin> I<[ncpu/]sshlogin[,[ncpu/]sshlogin[,...]]>
+
+Distribute jobs to remote servers. The jobs will be run on a list of
+remote servers. GNU B<parallel> will determine the number of CPU
+cores on the remote servers and run the number of jobs as specified by
+B<-j>. If the number I<ncpu> is given GNU B<parallel> will use this
+number for number of CPU cores on the host. Normally I<ncpu> will not
+be needed.
+
+An I<sshlogin> is of the form:
+
+ [sshcommand [options]][[email protected]]hostname
+
+The sshlogin must not require a password.
+
+The sshlogin ':' is special, it means 'no ssh' and will therefore run
+on the local computer.
+
+The sshlogin '..' is special, it read sshlogins from ~/.parallel/sshloginfile
+
+To specify more sshlogins separate the sshlogins by comma or repeat
+the options multiple times.
+
+For examples: see B<--sshloginfile>.
+
+The remote host must have GNU B<parallel> installed.
+
+B<--sshlogin> is known to cause problems with B<-m> and B<-X>.
+
+
+=item B<--sshloginfile> I<filename>
+
+File with sshlogins. The file consists of sshlogins on separate
+lines. Empty lines and lines starting with '#' are ignored. Example:
+
+ server.example.com
+ 8/my-8-core-server.example.com
+ # This server has SSH running on port 2222
+ ssh -p 2222 server.example.net
+ 4/ssh -p 2222 quadserver.example.net
+ # Use a different ssh program
+ myssh -p 2222 -l myusername hexacpu.example.net
+ # Use a different ssh program with default number of cores
+ //usr/local/bin/myssh -p 2222 -l myusername hexacpu.example.net
+ # Use a different ssh program with 6 cores
+ 6//usr/local/bin/myssh -p 2222 -l myusername hexacpu.example.net
+ # Assume 16 cores on the local computer
+ 16/:
+
+When using a different ssh program the last argument must be the hostname.
+
+The sshloginfile '..' is special, it read sshlogins from
+~/.parallel/sshloginfile
+
+
+=item B<--silent>
+
+Silent. The job to be run will not be printed. This is the default.
+Can be reversed with B<-v>.
+
+
+=item B<--verbose>
+
+=item B<-t>
+
+Print the command line on the standard error output before executing
+it.
+
+See also B<-v> and B<-p>.
+
+
+=item B<--transfer>
+
+Transfer files to remote servers. B<--transfer> is used with
+B<--sshlogin> when the arguments are files and should be transferred to
+the remote servers. The files will be transferred using B<rsync> and
+will be put relative to the default login dir. E.g.
+
+ echo foo/bar.txt | parallel \
+ --sshlogin server.example.com --transfer wc
+
+This will transfer the file I<foo/bar.txt> to the server
+I<server.example.com> to the file I<$HOME/foo/bar.txt> before running
+B<wc foo/bar.txt> on I<server.example.com>.
+
+ echo /tmp/foo/bar.txt | parallel \
+ --sshlogin server.example.com --transfer wc
+
+This will transfer the file I<foo/bar.txt> to the server
+I<server.example.com> to the file I</tmp/foo/bar.txt> before running
+B<wc /tmp/foo/bar.txt> on I<server.example.com>.
+
+B<--transfer> is often used with B<--return> and B<--cleanup>.
+
+B<--transfer> is ignored when used with B<--sshlogin :> or when not used with B<--sshlogin>.
+
+
+=item B<--trc> I<filename>
+
+Transfer, Return, Cleanup. Short hand for:
+
+B<--transfer> B<--return> I<filename> B<--cleanup>
+
+
+=item B<--trim> <n|l|r|lr|rl> (beta testing)
+
+Trim white space in input.
+
+=over 4
+
+=item n
+
+No trim. Input is not modified. This is the default.
+
+=item l
+
+Left trim. Remove white space from start of input. E.g. " a bc " -> "a bc ".
+
+=item r
+
+Right trim. Remove white space from end of input. E.g. " a bc " -> " a bc".
+
+=item lr
+
+=item rl
+
+Both trim. Remove white space from both start and end of input. E.g. "
+a bc " -> "a bc". This is the default if B<--colsep> is used.
+
+=back
+
+
+=item B<--ungroup>
+
+=item B<-u>
+
+Ungroup output. Output is printed as soon as possible. This may cause
+output from different commands to be mixed. GNU B<parallel> runs
+faster with B<-u>. Can be reversed with B<-g>.
+
+
+=item B<--extensionreplace> I<replace-str>
+
+=item B<-U> I<replace-str>
+
+Use the replacement string I<replace-str> instead of {.} for input line without extension.
+
+
+=item B<--use-cpus-instead-of-cores>
+
+Count the number of physical CPUs instead of CPU cores. When computing
+how many jobs to run simultaneously relative to the number of CPU cores
+you can ask GNU B<parallel> to instead look at the number of physical
+CPUs. This will make sense for computers that have hyperthreading as
+two jobs running on one CPU with hyperthreading will run slower than
+two jobs running on two physical CPUs. Some multi-core CPUs can run
+faster if only one thread is running per physical CPU. Most users will
+not need this option.
+
+
+=item B<-v>
+
+Verbose. Print the job to be run on STDOUT. Can be reversed with
+B<--silent>. See also B<-t>.
+
+Use B<-v> B<-v> to print the wrapping ssh command when running remotely.
+
+
+=item B<--version>
+
+=item B<-V>
+
+Print the version GNU B<parallel> and exit.
+
+
+=item B<--workdir> I<mydir> (beta testing)
+
+=item B<-W> I<mydir> (beta testing)
+
+Files transferred using B<--transfer> and B<--return> will be relative
+to I<mydir> on remote machines, and the command will be executed in
+that dir. The special workdir B<...> will create a workdir in
+B<~/.parallel/tmp/> on the remote machines and will be removed if
+using B<--cleanup>.
+
+
+=item B<--wait> (beta testing)
+
+Wait for all commands to complete.
+
+Implies B<--semaphore>.
+
+
+=item B<-X>
+
+Multiple arguments with context replace. Insert as many arguments as
+the command line length permits. If B<{}> is not used the arguments
+will be appended to the line. If B<{}> is used as part of a word
+(like I<pic{}.jpg>) then the whole word will be repeated. If B<{}> is
+used multiple times each B<{}> will be replaced with the arguments.
+
+Normally B<-X> will do the right thing, whereas B<-m> can give
+unexpected results if B<{}> is used as part of a word.
+
+Support for B<-X> with B<--sshlogin> is limited and may fail.
+
+See also B<-m>.
+
+
+=item B<--exit>
+
+=item B<-x>
+
+Exit if the size (see the B<-s> option) is exceeded.
+
+
+=item B<--shebang>
+
+=item B<--hashbang>
+
+=item B<-Y>
+
+GNU B<Parallel> can be called as a shebang (#!) command as the first line of a script. Like this:
+
+ #!/usr/bin/parallel -Yr traceroute
+
+ foss.org.my
+ debian.org
+ freenetproject.org
+
+For this to work B<--shebang> or B<-Y> must be set as the first option.
+
+
+=back
+
+=head1 EXAMPLE: Working as xargs -n1. Argument appending
+
+GNU B<parallel> can work similar to B<xargs -n1>.
+
+To compress all html files using B<gzip> run:
+
+B<find . -name '*.html' | parallel gzip>
+
+If the file names may contain a newline use B<-0>. Substitute FOO BAR with
+FUBAR in all files in this dir and subdirs:
+
+B<find . -type f -print0 | parallel -q0 perl -i -pe 's/FOO BAR/FUBAR/g'>
+
+Note B<-q> is needed because of the space in 'FOO BAR'.
+
+
+=head1 EXAMPLE: Reading arguments from command line
+
+GNU B<parallel> can take the arguments from command line instead of
+stdin (standard input). To compress all html files in the current dir
+using B<gzip> run:
+
+B<parallel gzip ::: *.html>
+
+To convert *.wav to *.mp3 using LAME running one process per CPU core
+run:
+
+B<parallel -j+0 lame {} -o {.}.mp3 ::: *.wav>
+
+
+=head1 EXAMPLE: Inserting multiple arguments
+
+When moving a lot of files like this: B<mv * destdir> you will
+sometimes get the error:
+
+B<bash: /bin/mv: Argument list too long>
+
+because there are too many files. You can instead do:
+
+B<ls | parallel mv {} destdir>
+
+This will run B<mv> for each file. It can be done faster if B<mv> gets
+as many arguments that will fit on the line:
+
+B<ls | parallel -m mv {} destdir>
+
+
+=head1 EXAMPLE: Context replace
+
+To remove the files I<pict0000.jpg> .. I<pict9999.jpg> you could do:
+
+B<seq -w 0 9999 | parallel rm pict{}.jpg>
+
+You could also do:
+
+B<seq -w 0 9999 | perl -pe 's/(.*)/pict$1.jpg/' | parallel -m rm>
+
+The first will run B<rm> 10000 times, while the last will only run
+B<rm> as many times needed to keep the command line length short
+enough to avoid B<Argument list too long> (it typically runs 1-2 times).
+
+You could also run:
+
+B<seq -w 0 9999 | parallel -X rm pict{}.jpg>
+
+This will also only run B<rm> as many times needed to keep the command
+line length short enough.
+
+
+=head1 EXAMPLE: Compute intensive jobs and substitution
+
+If ImageMagick is installed this will generate a thumbnail of a jpg
+file:
+
+B<convert -geometry 120 foo.jpg thumb_foo.jpg>
+
+If the system has more than 1 CPU core it can be run with
+number-of-cpu-cores jobs in parallel (B<-j> +0). This will do that for
+all jpg files in a directory:
+
+B<ls *.jpg | parallel -j +0 convert -geometry 120 {} thumb_{}>
+
+To do it recursively use B<find>:
+
+B<find . -name '*.jpg' | parallel -j +0 convert -geometry 120 {} {}_thumb.jpg>
+
+Notice how the argument has to start with B<{}> as B<{}> will include path
+(e.g. running B<convert -geometry 120 ./foo/bar.jpg
+thumb_./foo/bar.jpg> would clearly be wrong). The command will
+generate files like ./foo/bar.jpg_thumb.jpg.
+
+Use B<{.}> to avoid the extra .jpg in the file name. This command will
+make files like ./foo/bar_thumb.jpg:
+
+B<find . -name '*.jpg' | parallel -j +0 convert -geometry 120 {} {.}_thumb.jpg>
+
+
+=head1 EXAMPLE: Substitution and redirection
+
+This will generate an uncompressed version of .gz-files next to the .gz-file:
+
+B<parallel zcat {} ">>B<"{.} ::: *.gz>
+
+Quoting of > is necessary to postpone the redirection. Another
+solution is to quote the whole command:
+
+B<parallel "zcat {} >>B<{.}" ::: *.gz>
+
+Other special shell charaters (such as * ; $ > < | >> <<) also needs
+to be put in quotes, as they may otherwise be interpreted by the shell
+and not given to GNU B<parallel>.
+
+=head1 EXAMPLE: Composed commands
+
+A job can consist of several commands. This will print the number of
+files in each directory:
+
+B<ls | parallel 'echo -n {}" "; ls {}|wc -l'>
+
+To put the output in a file called <name>.dir:
+
+B<ls | parallel '(echo -n {}" "; ls {}|wc -l) >> B<{}.dir'>
+
+Even small shell scripts can be run by GNU B<parallel>:
+
+B<find . | parallel 'a={}; name=${a##*/}; upper=$(echo "$name" | tr "[:lower:]" "[:upper:]"); echo "$name - $upper"'>
+
+Given a list of URLs, list all URLs that fail to download. Print the
+line number and the URL.
+
+B<cat urlfile | parallel "wget {} 2>>B</dev/null || grep -n {} urlfile">
+
+
+=head1 EXAMPLE: Removing file extension when processing files
+
+When processing files removing the file extension using B<{.}> is
+often useful.
+
+Create a directory for each zip-file and unzip it in that dir:
+
+B<parallel 'mkdir {.}; cd {.}; unzip ../{}' ::: *.zip>
+
+Recompress all .gz files in current directory using B<bzip2> running 1
+job per CPU core in parallel:
+
+B<parallel -j+0 "zcat {} | bzip2 >>B<{.}.bz2 && rm {}" ::: *.gz>
+
+Convert all WAV files to MP3 using LAME:
+
+B<find sounddir -type f -name '*.wav' | parallel -j+0 lame {} -o {.}.mp3>
+
+
+=head1 EXAMPLE: Removing two file extensions when processing files and
+calling GNU Parallel from itself
+
+If you have directory with tar.gz files and want these extracted in
+the corresponding dir (e.g foo.tar.gz will be extracted in the dir
+foo) you can do:
+
+B<ls *.tar.gz| parallel -U {tar} 'echo {tar}|parallel "mkdir -p {.} ; tar -C {.} -xf {.}.tar.gz"'>
+
+=head1 EXAMPLE: Download 10 images for each of the past 30 days
+
+Let us assume a website stores images like:
+
+ http://www.example.com/path/to/YYYYMMDD_##.jpg
+
+where YYYYMMDD is the date and ## is the number 01-10. This will
+generate the past 30 days as YYYYMMDD:
+
+B<seq 1 30 | parallel date -d '"today -{} days"' +%Y%m%d>
+
+Based on this we can let GNU B<parallel> generate 10 B<wget>s per day:
+
+I<the above> B<| parallel -I {o} seq -w 1 10 "|" parallel wget
+http://www.example.com/path/to/{o}_{}.jpg>
+
+=head1 EXAMPLE: Rewriting a for-loop and a while-loop
+
+for-loops like this:
+
+ (for x in `cat list` ; do
+ do_something $x
+ done) | process_output
+
+and while-loops like this:
+
+ cat list | (while read x ; do
+ do_something $x
+ done) | process_output
+
+can be written like this:
+
+B<cat list | parallel do_something | process_output>
+
+If the processing requires more steps the for-loop like this:
+
+ (for x in `cat list` ; do
+ no_extension=${x%.*};
+ do_something $x scale $no_extension.jpg
+ do_step2 <$x $no_extension
+ done) | process_output
+
+and while-loops like this:
+
+ cat list | (while read x ; do
+ no_extension=${x%.*};
+ do_something $x scale $no_extension.jpg
+ do_step2 <$x $no_extension
+ done) | process_output
+
+can be written like this:
+
+B<cat list | parallel "do_something {} scale {.}.jpg ; do_step2 <{} {.}" | process_output>
+
+
+=head1 EXAMPLE: Group output lines
+
+When running jobs that output data, you often do not want the output
+of multiple jobs to run together. GNU B<parallel> defaults to grouping the
+output of each job, so the output is printed when the job finishes. If
+you want the output to be printed while the job is running you can use
+B<-u>.
+
+Compare the output of:
+
+B<parallel traceroute ::: foss.org.my debian.org freenetproject.org>
+
+to the output of:
+
+B<parallel -u traceroute ::: foss.org.my debian.org freenetproject.org>
+
+
+=head1 EXAMPLE: Keep order of output same as order of input
+
+Normally the output of a job will be printed as soon as it
+completes. Sometimes you want the order of the output to remain the
+same as the order of the input. This is often important, if the output
+is used as input for another system. B<-k> will make sure the order of
+output will be in the same order as input even if later jobs end
+before earlier jobs.
+
+Append a string to every line in a text file:
+
+B<cat textfile | parallel -k echo {} append_string>
+
+If you remove B<-k> some of the lines may come out in the wrong order.
+
+Another example is B<traceroute>:
+
+B<parallel traceroute ::: foss.org.my debian.org freenetproject.org>
+
+will give traceroute of foss.org.my, debian.org and
+freenetproject.org, but it will be sorted according to which job
+completed first.
+
+To keep the order the same as input run:
+
+B<parallel -k traceroute ::: foss.org.my debian.org freenetproject.org>
+
+This will make sure the traceroute to foss.org.my will be printed
+first.
+
+
+=head1 EXAMPLE: Parallel grep
+
+B<grep -r> greps recursively through directories. On multicore CPUs
+GNU B<parallel> can often speed this up.
+
+B<find . -type f | parallel -k -j150% -n 1000 -m grep -H -n STRING {}>
+
+This will run 1.5 job per core, and give 1000 arguments to B<grep>.
+
+
+=head1 EXAMPLE: Using remote computers
+
+To run commands on a remote computer SSH needs to be set up and you
+must be able to login without entering a password (B<ssh-agent> may be
+handy).
+
+To run B<echo> on B<server.example.com>:
+
+ seq 1 10 | parallel --sshlogin server.example.com echo
+
+To run commands on more than one remote computer run:
+
+ seq 1 10 | parallel --sshlogin server.example.com,server2.example.net echo
+
+Or:
+
+ seq 1 10 | parallel --sshlogin server.example.com \
+ --sshlogin server2.example.net echo
+
+If the login username is I<foo> on I<server2.example.net> use:
+
+ seq 1 10 | parallel --sshlogin server.example.com \
+ --sshlogin [email protected] echo
+
+To distribute the commands to a list of computers, make a file
+I<mycomputers> with all the computers:
+
+ server.example.com
+ server3.example.com
+
+Then run:
+
+ seq 1 10 | parallel --sshloginfile mycomputers echo
+
+To include the local computer add the special sshlogin ':' to the list:
+
+ server.example.com
+ server3.example.com
+ :
+
+GNU B<parallel> will try to determine the number of CPU cores on each
+of the remote computers, so B<-j+0> will run one job per CPU core -
+even if the remote computers do not have the same number of CPU cores.
+
+If the number of CPU cores on the remote servers is not identified
+correctly the number of CPU cores can be added in front. Here the
+server has 8 CPU cores.
+
+ seq 1 10 | parallel --sshlogin 8/server.example.com echo
+
+
+=head1 EXAMPLE: Transferring of files
+
+To recompress gzipped files with B<bzip2> using a remote server run:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com \
+ --transfer "zcat {} | bzip2 -9 >{.}.bz2"
+
+This will list the .gz-files in the I<logs> directory and all
+directories below. Then it will transfer the files to
+I<server.example.com> to the corresponding directory in
+I<$HOME/logs>. On I<server.example.com> the file will be recompressed
+using B<zcat> and B<bzip2> resulting in the corresponding file with
+I<.gz> replaced with I<.bz2>.
+
+If you want the resulting bz2-file to be transferred back to the local
+computer add I<--return {.}.bz2>:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com \
+ --transfer --return {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+After the recompressing is done the I<.bz2>-file is transferred back to
+the local computer and put next to the original I<.gz>-file.
+
+If you want to delete the transferred files on the remote computer add
+I<--cleanup>. This will remove both the file transferred to the remote
+computer and the files transferred from the remote computer:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com \
+ --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2"
+
+If you want run on several servers add the servers to I<--sshlogin>
+either using ',' or multiple I<--sshlogin>:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com,server2.example.com \
+ --sshlogin server3.example.com \
+ --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2"
+
+You can add the local computer using I<--sshlogin :>. This will disable the
+removing and transferring for the local computer only:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com,server2.example.com \
+ --sshlogin server3.example.com \
+ --sshlogin : \
+ --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2"
+
+Often I<--transfer>, I<--return> and I<--cleanup> are used together. They can be
+shortened to I<--trc>:
+
+ find logs/ -name '*.gz' | \
+ parallel --sshlogin server.example.com,server2.example.com \
+ --sshlogin server3.example.com \
+ --sshlogin : \
+ --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+With the file I<mycomputers> containing the list of computers it becomes:
+
+ find logs/ -name '*.gz' | parallel --sshloginfile mycomputers \
+ --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+If the file I<~/.parallel/sshloginfile> contains the list of computers
+the special short hand I<-S ..> can be used:
+
+ find logs/ -name '*.gz' | parallel -S .. \
+ --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2"
+
+=head1 EXAMPLE: Distributing work to local and remote computers
+
+Convert *.mp3 to *.ogg running one process per CPU core on local computer and server2:
+
+ parallel --trc {.}.ogg -j+0 -S server2,: \
+ 'mpg321 -w - {} | oggenc -q0 - -o {.}.ogg' ::: *.mp3
+
+=head1 EXAMPLE: Use multiple inputs in one command
+
+Copy files like foo.es.ext to foo.ext:
+
+B<ls *.es.* | perl -pe 'print; s/\.es//' | parallel -N2 cp {1} {2}>
+
+The perl command spits out 2 lines for each input. GNU B<parallel>
+takes 2 inputs (using B<-N2>) and replaces {1} and {2} with the inputs.
+
+Print the number on the opposing sides of a six sided die:
+
+B<parallel -a <(seq 6) -a <(seq 6 -1 1) echo>
+
+Convert files from all subdirs to PNG-files with consecutive numbers
+(useful for making input PNG's for B<ffmpeg>):
+
+B<parallel -a <(find . -type f | sort) -a <(seq 1 $(find . -type f|wc -l)) convert {1} {2}.png>
+
+Alternative version:
+
+B<find . -type f | sort | parallel convert {} \$PARALLEL_SEQ.png>
+
+
+=head1 EXAMPLE: Use a table as input
+
+Content of table_file.tsv:
+
+ foo<TAB>bar
+ baz <TAB> quux
+
+To run:
+
+ cmd -o bar -i foo
+ cmd -o quux -i baz
+
+you can run:
+
+B<parallel -a table_file.tsv --colsep '\t' cmd -o {2} -i {1}>
+
+Note: The default for GNU B<parallel> is to remove the spaces around the columns. To keep the spaces:
+
+B<parallel -a table_file.tsv --trim n --colsep '\t' cmd -o {2} -i {1}>
+
+
+=head1 EXAMPLE: Working as cat | sh. Resource inexpensive jobs and evaluation
+
+GNU B<parallel> can work similar to B<cat | sh>.
+
+A resource inexpensive job is a job that takes very little CPU, disk
+I/O and network I/O. Ping is an example of a resource inexpensive
+job. wget is too - if the webpages are small.
+
+The content of the file jobs_to_run:
+
+ ping -c 1 10.0.0.1
+ wget http://status-server/status.cgi?ip=10.0.0.1
+ ping -c 1 10.0.0.2
+ wget http://status-server/status.cgi?ip=10.0.0.2
+ ...
+ ping -c 1 10.0.0.255
+ wget http://status-server/status.cgi?ip=10.0.0.255
+
+To run 100 processes simultaneously do:
+
+B<parallel -j 100 < jobs_to_run>
+
+As there is not a I<command> the option B<--command> is default
+because the jobs needs to be evaluated by the shell.
+
+
+=head1 EXAMPLE: Working as mutex and counting semaphore
+
+The command B<sem> is an alias for B<parallel --semaphore>.
+
+A counting semaphore will allow a given number of jobs to be started
+in the background. When the number of jobs are running in the
+background, GNU B<sem> will wait for one of these to complete before
+starting another command. B<sem --wait> will wait for all jobs to
+complete.
+
+Run 10 jobs concurrently in the background:
+
+ for i in `ls *.log` ; do
+ echo $i
+ sem -j10 gzip $i ";" echo done
+ done
+ sem --wait
+
+A mutex is a counting semaphore allowing only one job to run. This
+will edit the file I<myfile> and prepends the file with lines with the
+numbers 1 to 3.
+
+ seq 1 3 | parallel sem sed -i -e 'i{}' myfile
+
+As I<myfile> can be very big it is important only one process edits
+the file at the same time.
+
+Name the semaphore to have multiple different semaphores active at the
+same time:
+
+ seq 1 3 | parallel sem --id mymutex sed -i -e 'i{}' myfile
+
+
+=head1 EXAMPLE: Start editor with filenames from stdin (standard input)
+
+You can use GNU Parallel to start interactive programs like emacs or vi:
+
+B<cat filelist | parallel -uXj1 emacs>
+
+B<cat filelist | parallel -uXj1 vi>
+
+If there are more files than will fit on a single command line, the
+editor will be started again with the remaining files.
+
+
+=head1 EXAMPLE: GNU Parallel as queue system/batch manager
+
+GNU Parallel can work as a simple job queue system or batch manager.
+The idea is to put the jobs into a file and have GNU Parallel read
+from that continuously. As GNU Parallel will stop at end of file we
+use tail to continue reading:
+
+B<echo >>B<jobqueue>; B<tail -f jobqueue | parallel>
+
+To submit your jobs to the queue:
+
+B<echo my_command my_arg >>>B< jobqueue>
+
+You can of course use B<-S> to distribute the jobs to remote
+computers:
+
+B<echo >>B<jobqueue>; B<tail -f jobqueue | parallel -S ..>
+
+
+=head1 EXAMPLE: GNU Parallel as dir processor
+
+If you have a dir in which users drop files that needs to be processed
+you can do this on GNU/Linux (If you know what B<inotifywait> is
+called on other platforms file a bug report):
+
+B<inotifywait -q -m -r -e CLOSE_WRITE --format %w%f my_dir | parallel
+-u echo>
+
+This will run the command B<echo> on each file put into B<my_dir> or
+subdirs of B<my_dir>.
+
+The B<-u> is needed because of a small bug in GNU B<parallel>. If that
+proves to be a problem, file a bug report.
+
+You can of course use B<-S> to distribute the jobs to remote
+computers:
+
+B<inotifywait -q -m -r -e CLOSE_WRITE --format %w%f my_dir | parallel -S ..
+-u echo>
+
+
+=head1 QUOTING
+
+For more advanced use quoting may be an issue. The following will
+print the filename for each line that has exactly 2 columns:
+
+B<perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' file>
+
+This can be done by GNU B<parallel> using:
+
+B<ls | parallel "perl -ne '/^\\S+\\s+\\S+$/ and print \$ARGV,\"\\n\"'">
+
+Notice how \'s, "'s, and $'s needs to be quoted. GNU B<parallel> can do
+the quoting by using option B<-q>:
+
+B<ls | parallel -q perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"'>
+
+However, this means you cannot make the shell interpret special
+characters. For example this B<will not work>:
+
+B<ls *.gz | parallel -q "zcat {} >>B<{.}">
+
+B<ls *.gz | parallel -q "zcat {} | bzip2 >>B<{.}.bz2">
+
+because > and | need to be interpreted by the shell.
+
+If you get errors like:
+
+ sh: -c: line 0: syntax error near unexpected token
+ sh: Syntax error: Unterminated quoted string
+ sh: -c: line 0: unexpected EOF while looking for matching `''
+ sh: -c: line 1: syntax error: unexpected end of file
+
+then you might try using B<-q>.
+
+If you are using B<bash> process substitution like B<<(cat foo)> then
+you may try B<-q> and prepending I<command> with B<bash -c>:
+
+B<ls | parallel -q bash -c 'wc -c <(echo {})'>
+
+Or for substituting output:
+
+B<ls | parallel -q bash -c 'tar c {} | tee >>B<(gzip >>B<{}.tar.gz) | bzip2 >>B<{}.tar.bz2'>
+
+B<Conclusion>: To avoid dealing with the quoting problems it may be
+easier just to write a small script and have GNU B<parallel> call that
+script.
+
+
+=head1 LIST RUNNING JOBS
+
+If you want a list of the jobs currently running you can run:
+
+B<killall -USR1 parallel>
+
+GNU B<parallel> will then print the currently running jobs on STDERR.
+
+
+=head1 COMPLETE RUNNING JOBS BUT DO NOT START NEW JOBS
+
+If you regret starting a lot of jobs you can simply break GNU B<parallel>,
+but if you want to make sure you do not have halfcompleted jobs you
+should send the signal B<SIGTERM> to GNU B<parallel>:
+
+B<killall -TERM parallel>
+
+This will tell GNU B<parallel> to not start any new jobs, but wait until
+the currently running jobs are finished before exiting.
+
+
+=head1 ENVIRONMENT VARIABLES
+
+=over 9
+
+=item $PARALLEL_PID
+
+The environment variable $PARALLEL_PID is set by GNU B<parallel> and
+is visible to the jobs started from GNU B<parallel>. This makes it
+possible for the jobs to communicate directly to GNU B<parallel>.
+Remember to quote the $, so it gets evaluated by the correct
+shell.
+
+B<Example:> If each of the jobs tests a solution and one of jobs finds
+the solution the job can tell GNU B<parallel> not to start more jobs
+by: B<kill -TERM $PARALLEL_PID>. This only works on the local
+computer.
+
+
+=item $PARALLEL_SEQ
+
+$PARALLEL_SEQ will be set to the sequence number of the job
+running. Remember to quote the $, so it gets evaluated by the correct
+shell.
+
+B<Example:>
+
+B<seq 1 10 | parallel -N2 echo seq:'$'PARALLEL_SEQ arg1:{1} arg2:{2}>
+
+
+=item $PARALLEL
+
+The environment variable $PARALLEL will be used as default options for
+GNU B<parallel>. If the variable contains special shell characters
+(e.g. $, *, or space) then these need to be to be escaped with \.
+
+B<Example:>
+
+B<cat list | parallel -j1 -k -v ls>
+
+can be written as:
+
+B<cat list | PARALLEL="-kvj1" parallel ls>
+
+B<cat list | parallel -j1 -k -v -S"myssh [email protected]" ls>
+
+can be written as:
+
+B<cat list | PARALLEL='-kvj1 -S myssh\ [email protected]' parallel echo>
+
+Notice the \ in the middle is needed because 'myssh' and '[email protected]'
+must be one argument.
+
+=back
+
+=head1 DEFAULT PROFILE (CONFIG FILE)
+
+The file ~/.parallel/config (formerly known as .parallelrc) will be
+read if it exists. Lines starting with '#' will be ignored. It can be
+formatted like the environment variable $PARALLEL, but it is often
+easier to simply put each option on its own line.
+
+Options on the command line takes precedence over the environment
+variable $PARALLEL which takes precedence over the file
+~/.parallel/config.
+
+=head1 PROFILE FILES
+
+If B<--profile> set, GNU B<parallel> will read the profile from that file instead of
+~/.parallel/config.
+
+Example: Profile for running every command with B<-j+0> and B<nice>
+
+ echo -j+0 nice > ~/.parallel/nice_profile
+ parallel -J nice_profile bzip2 -9 ::: *
+
+Example: Profile for running a perl script before every command:
+
+ echo "perl -e '\$a=\$\$; print \$a,\" \",'\$PARALLEL_SEQ',\" \";';" > ~/.parallel/pre_perl
+ parallel -J pre_perl echo ::: *
+
+Note how the $ and " need to be quoted using \.
+
+Example: Profile for running distributed jobs with B<nice> on the
+remote machines:
+
+ echo -S .. nice > ~/.parallel/dist
+ parallel -J dist --trc {.}.bz2 bzip2 -9 ::: *
+
+
+=head1 EXIT STATUS
+
+If B<--halt-on-error> 0 or not specified:
+
+=over 6
+
+=item 0
+
+All jobs ran without error.
+
+=item 1-253
+
+Some of the jobs failed. The exit status gives the number of failed jobs
+
+=item 254
+
+More than 253 jobs failed.
+
+=item 255
+
+Other error.
+
+=back
+
+If B<--halt-on-error> 1 or 2: Exit status of the failing job.
+
+
+=head1 DIFFERENCES BETWEEN GNU Parallel AND ALTERNATIVES
+
+There are a lot programs with some of the functionality of GNU
+B<parallel>. GNU B<parallel> strives to include the best of the
+functionality without sacrifying ease of use.
+
+=head2 SUMMARY TABLE
+
+The following features are in some of the comparable tools:
+
+Inputs
+ I1. Arguments can be read from stdin
+ I2. Arguments can be read from a file
+ I3. Arguments can be read from multiple files
+ I4. Arguments can be read from command line
+ I5. Arguments can be read from a table
+ I6. Arguments can be read from the same file using #! (shebang)
+ I7. Line oriented input as default (Quoting of special chars not needed)
+
+Manipulation of input
+ M1. Composed command
+ M2. Multiple arguments can fill up an execution line
+ M3. Arguments can be put anywhere in the execution line
+ M4. Multiple arguments can be put anywhere in the execution line
+ M5. Arguments can be replaced with context
+ M6. Input can be treated as complete execution line
+
+Outputs
+ O1. Grouping output so output from different jobs do not mix
+ O2. Send stderr to stderr
+ O3. Send stdout to stdout
+ O4. Order of output can be same as order of input
+ O5. Stdout only contains stdout from the command
+ O6. Stderr only contains stdout from the command
+
+Execution
+ E1. Running jobs in parallel
+ E2. List running jobs
+ E3. Finish running jobs, but do not start new jobs
+ E4. Number of running jobs can depend on number of cpus
+ E5. Finish running jobs, but do not start new jobs after first failure
+ E6. Number of running jobs can be adjusted while running
+
+Remote execution
+ R1. Jobs can be run on remote computers
+ R2. Basefiles can be transferred
+ R3. Argument files can be transferred
+ R4. Result files can be transferred
+ R5. Cleanup of transferred files
+ R6. No config files needed
+ R7. Do not run more than SSHD's MaxStartup can handle
+ R8. Configurable SSH command
+ R9. Retry if connection breaks occationally
+
+Semaphore
+ S1. Possibility to work as a mutex
+ S2. Possibility to work as a counting semaphore
+
+Legend
+ - = no
+ x = not applicable
+ ID = yes
+
+As every new version of the programs are not tested the table may be
+outdated. Please file a bug-report if you find errors (See REPORTING
+BUGS).
+
+parallel:
+I1 I2 I3 I4 I5 I6 I7
+M1 M2 M3 M4 M5 M6
+O1 O2 O3 O4 O5 O6
+E1 E2 E3 E4 E5 E6
+R1 R2 R3 R4 R5 R6 R7 R8 R9
+S1 S2
+
+xargs:
+I1 I2 - - - - -
+- M2 M3 - - -
+- O2 O3 - O5 O6
+E1 - - - - -
+- - - - - x - - -
+- -
+
+find -exec:
+- - - x - x -
+- M2 M3 - - - -
+- O2 O3 O4 O5 O6
+- - - - - - -
+- - - - - - - - -
+x x
+
+make -j:
+- - - - - - -
+- - - - - -
+O1 O2 O3 - x O6
+E1 - - - E5 -
+- - - - - - - - -
+- -
+
+ppss:
+I1 I2 - - - - I7
+M1 - M3 - - M6
+O1 - - x - -
+E1 E2 ?E3 E4 - -
+R1 R2 R3 R4 - - ?R7 ? ?
+- -
+
+pexec:
+I1 I2 - I4 I5 - -
+M1 - M3 - - M6
+O1 O2 O3 - O5 O6
+E1 - - E4 - E6
+R1 - - - - R6 - - -
+S1 -
+
+xjobs: TODO - Please file a bug-report if you know what features xjobs
+supports (See REPORTING BUGS).
+
+prll: TODO - Please file a bug-report if you know what features prll
+supports (See REPORTING BUGS).
+
+dxargs: TODO - Please file a bug-report if you know what features dxargs
+supports (See REPORTING BUGS).
+
+mdm/middelman: TODO - Please file a bug-report if you know what
+features mdm/middelman supports (See REPORTING BUGS).
+
+xapply: TODO - Please file a bug-report if you know what features xapply
+supports (See REPORTING BUGS).
+
+paexec: TODO - Please file a bug-report if you know what features paexec
+supports (See REPORTING BUGS).
+
+ClusterSSH: TODO - Please file a bug-report if you know what features ClusterSSH
+supports (See REPORTING BUGS).
+
+
+=head2 DIFFERENCES BETWEEN xargs AND GNU Parallel
+
+B<xargs> offer some of the same possibilites as GNU B<parallel>.
+
+B<xargs> deals badly with special characters (such as space, ' and
+"). To see the problem try this:
+
+ touch important_file
+ touch 'not important_file'
+ ls not* | xargs rm
+ mkdir -p '12" records'
+ ls | xargs rmdir
+
+You can specify B<-0> or B<-d "\n">, but many input generators are not
+optimized for using B<NUL> as separator but are optimized for
+B<newline> as separator. E.g B<head>, B<tail>, B<awk>, B<ls>, B<echo>,
+B<sed>, B<tar -v>, B<perl> (B<-0> and \0 instead of \n), B<locate>
+(requires using B<-0>), B<find> (requires using B<-print0>), B<grep>
+(requires user to use B<-z> or B<-Z>), B<sort> (requires using B<-z>).
+
+So GNU B<parallel>'s newline separation can be emulated with:
+
+B<cat | xargs -d "\n" -n1 I<command>>
+
+B<xargs> can run a given number of jobs in parallel, but has no
+support for running number-of-cpu-cores jobs in parallel.
+
+B<xargs> has no support for grouping the output, therefore output may
+run together, e.g. the first half of a line is from one process and
+the last half of the line is from another process. The example
+B<Parallel grep> cannot be done reliably with B<xargs> because of
+this. To see this in action try:
+
+ parallel perl -e '\$a=\"1{}\"x10000000\;print\ \$a,\"\\n\"' '>' {} ::: a b c d e f
+ ls -l a b c d e f
+ parallel -kP4 -n1 grep 1 > out.par ::: a b c d e f
+ echo a b c d e f | xargs -P4 -n1 grep 1 > out.xargs-unbuf
+ echo a b c d e f | xargs -P4 -n1 grep --line-buffered 1 > out.xargs-linebuf
+ echo a b c d e f | xargs -n1 grep --line-buffered 1 > out.xargs-serial
+ ls -l out*
+ md5sum out*
+
+B<xargs> has no support for keeping the order of the output, therefore
+if running jobs in parallel using B<xargs> the output of the second
+job cannot be postponed till the first job is done.
+
+B<xargs> has no support for running jobs on remote computers.
+
+B<xargs> has no support for context replace, so you will have to create the
+arguments.
+
+If you use a replace string in B<xargs> (B<-I>) you can not force
+B<xargs> to use more than one argument.
+
+Quoting in B<xargs> works like B<-q> in GNU B<parallel>. This means
+composed commands and redirection require using B<bash -c>.
+
+B<ls | parallel "wc {} >> B<{}.wc">
+
+becomes
+
+B<ls | xargs -d "\n" -P9 -I {} bash -c "wc {} >>B< {}.wc">
+
+and
+
+B<ls | parallel "echo {}; ls {}|wc">
+
+becomes
+
+B<ls | xargs -d "\n" -P9 -I {} bash -c "echo {}; ls {}|wc">
+
+
+=head2 DIFFERENCES BETWEEN find -exec AND GNU Parallel
+
+B<find -exec> offer some of the same possibilites as GNU B<parallel>.
+
+B<find -exec> only works on files. So processing other input (such as
+hosts or URLs) will require creating these inputs as files. B<find
+-exec> has no support for running commands in parallel.
+
+
+=head2 DIFFERENCES BETWEEN make -j AND GNU Parallel
+
+B<make -j> can run jobs in parallel, but requires a crafted Makefile
+to do this. That results in extra quoting to get filename containing
+newline to work correctly.
+
+B<make -j> has no support for grouping the output, therefore output
+may run together, e.g. the first half of a line is from one process
+and the last half of the line is from another process. The example
+B<Parallel grep> cannot be done reliably with B<make -j> because of
+this.
+
+(Very early versions of GNU B<parallel> were coincidently implemented
+using B<make -j>).
+
+
+=head2 DIFFERENCES BETWEEN ppss AND GNU Parallel
+
+B<ppss> is also a tool for running jobs in parallel.
+
+The output of B<ppss> is status information and thus not useful for
+using as input for another command. The output from the jobs are put
+into files.
+
+The argument replace string ($ITEM) cannot be changed. Arguments must
+be quoted - thus arguments containing special characters (space '"&!*)
+may cause problems. More than one argument is not supported. File
+names containing newlines are not processed correctly. When reading
+input from a file null cannot be used terminator. B<ppss> needs to
+read the whole input file before starting any jobs.
+
+Output and status information is stored in ppss_dir and thus requires
+cleanup when completed. If the dir is not removed before running
+B<ppss> again it may cause nothing to happen as B<ppss> thinks the
+task is already done. GNU B<parallel> will normally not need cleaning
+up if running locally and will only need cleaning up if stopped
+abnormally and running remote (B<--cleanup> may not complete if
+stopped abnormally). The example B<Parallel grep> would require extra
+postprocessing if written using B<ppss>.
+
+For remote systems PPSS requires 3 steps: config, deploy, and
+start. GNU B<parallel> only requires one step.
+
+=head3 EXAMPLES FROM ppss MANUAL
+
+Here are the examples from B<ppss>'s manual page with the equivalent
+using GNU B<parallel>:
+
+B<1> ./ppss.sh standalone -d /path/to/files -c 'gzip '
+
+B<1> find /path/to/files -type f | parallel -j+0 gzip
+
+B<2> ./ppss.sh standalone -d /path/to/files -c 'cp "$ITEM" /destination/dir '
+
+B<2> find /path/to/files -type f | parallel -j+0 cp {} /destination/dir
+
+B<3> ./ppss.sh standalone -f list-of-urls.txt -c 'wget -q '
+
+B<3> parallel -a list-of-urls.txt wget -q
+
+B<4> ./ppss.sh standalone -f list-of-urls.txt -c 'wget -q "$ITEM"'
+
+B<4> parallel -a list-of-urls.txt wget -q {}
+
+B<5> ./ppss config -C config.cfg -c 'encode.sh ' -d /source/dir -m
+192.168.1.100 -u ppss -k ppss-key.key -S ./encode.sh -n nodes.txt -o
+/some/output/dir --upload --download ; ./ppss deploy -C config.cfg ;
+./ppss start -C config
+
+B<5> # parallel does not use configs. If you want a different username put it in nodes.txt: [email protected]
+
+B<5> find source/dir -type f | parallel --sshloginfile nodes.txt --trc {.}.mp3 lame -a {} -o {.}.mp3 --preset standard --quiet
+
+B<6> ./ppss stop -C config.cfg
+
+B<6> killall -TERM parallel
+
+B<7> ./ppss pause -C config.cfg
+
+B<7> Press: CTRL-Z or killall -SIGTSTP parallel
+
+B<8> ./ppss continue -C config.cfg
+
+B<8> Enter: fg or killall -SIGCONT parallel
+
+B<9> ./ppss.sh status -C config.cfg
+
+B<9> killall -SIGUSR2 parallel
+
+
+=head2 DIFFERENCES BETWEEN pexec AND GNU Parallel
+
+B<pexec> is also a tool for running jobs in parallel.
+
+Here are the examples from B<pexec>'s info page with the equivalent
+using GNU B<parallel>:
+
+B<1> pexec -o sqrt-%s.dat -p "$(seq 10)" -e NUM -n 4 -c -- \
+ 'echo "scale=10000;sqrt($NUM)" | bc'
+
+B<1> seq 10 | parallel -j4 'echo "scale=10000;sqrt({})" | bc > sqrt-{}.dat'
+
+B<2> pexec -p "$(ls myfiles*.ext)" -i %s -o %s.sort -- sort
+
+B<2> ls myfiles*.ext | parallel sort {} ">{}.sort"
+
+B<3> pexec -f image.list -n auto -e B -u star.log -c -- \
+ 'fistar $B.fits -f 100 -F id,x,y,flux -o $B.star'
+
+B<3> parallel -a image.list -j+0 \
+ 'fistar {}.fits -f 100 -F id,x,y,flux -o {}.star' 2>star.log
+
+B<4> pexec -r *.png -e IMG -c -o - -- \
+ 'convert $IMG ${IMG%.png}.jpeg ; "echo $IMG: done"'
+
+B<4> ls *.png | parallel 'convert {} {.}.jpeg; echo {}: done'
+
+B<5> pexec -r *.png -i %s -o %s.jpg -c 'pngtopnm | pnmtojpeg'
+
+B<5> ls *.png | parallel 'pngtopnm < {} | pnmtojpeg > {}.jpg'
+
+B<6> for p in *.png ; do echo ${p%.png} ; done | \
+ pexec -f - -i %s.png -o %s.jpg -c 'pngtopnm | pnmtojpeg'
+
+B<6> ls *.png | parallel 'pngtopnm < {} | pnmtojpeg > {.}.jpg'
+
+B<7> LIST=$(for p in *.png ; do echo ${p%.png} ; done)
+ pexec -r $LIST -i %s.png -o %s.jpg -c 'pngtopnm | pnmtojpeg'
+
+B<7> ls *.png | parallel 'pngtopnm < {} | pnmtojpeg > {.}.jpg'
+
+B<8> pexec -n 8 -r *.jpg -y unix -e IMG -c \
+ 'pexec -j -m blockread -d $IMG | \
+ jpegtopnm | pnmscale 0.5 | pnmtojpeg | \
+ pexec -j -m blockwrite -s th_$IMG'
+
+B<8> Combining GNU B<parallel> and GNU B<sem>.
+
+B<8> ls *jpg | parallel -j8 'sem --id blockread cat {} | jpegtopnm |' \
+ 'pnmscale 0.5 | pnmtojpeg | sem --id blockwrite cat > th_{}'
+
+B<8> If reading and writing is done to the same disk, this may be
+faster as only one process will be either reading or writing:
+
+B<8> ls *jpg | parallel -j8 'sem --id diskio cat {} | jpegtopnm |' \
+ 'pnmscale 0.5 | pnmtojpeg | sem --id diskio cat > th_{}'
+
+=head2 DIFFERENCES BETWEEN xjobs AND GNU Parallel
+
+B<xjobs> is also a tool for running jobs in parallel. It only supports
+running jobs on your local computer.
+
+B<xjobs> deals badly with special characters just like B<xargs>. See
+the section B<DIFFERENCES BETWEEN xargs AND GNU Parallel>.
+
+Here are the examples from B<xjobs>'s man page with the equivalent
+using GNU B<parallel>:
+
+B<1> ls -1 *.zip | xjobs unzip
+
+B<1> ls *.zip | parallel unzip
+
+B<2> ls -1 *.zip | xjobs -n unzip
+
+B<2> ls *.zip | parallel unzip >/dev/null
+
+B<3> find . -name '*.bak' | xjobs gzip
+
+B<3> find . -name '*.bak' | parallel gzip
+
+B<4> ls -1 *.jar | sed 's/\(.*\)/\1 > \1.idx/' | xjobs jar tf
+
+B<4> ls *.jar | parallel jar tf {} '>' {}.idx
+
+B<5> xjobs -s script
+
+B<5> cat script | parallel
+
+B<6> mkfifo /var/run/my_named_pipe;
+xjobs -s /var/run/my_named_pipe &
+echo unzip 1.zip >> /var/run/my_named_pipe;
+echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe
+
+B<6> mkfifo /var/run/my_named_pipe;
+cat /var/run/my_named_pipe | parallel &
+echo unzip 1.zip >> /var/run/my_named_pipe;
+echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe
+
+
+=head2 DIFFERENCES BETWEEN prll AND GNU Parallel
+
+B<prll> is also a tool for running jobs in parallel. It does not
+support running jobs on remote computers.
+
+B<prll> encourages using BASH aliases and BASH functions instead of
+scripts. GNU B<parallel> can use the aliases and functions that are
+defined at login (using: B<parallel bash -ci myalias>) but it will
+never support running aliases and functions that are defined defined
+later (see why
+http://www.perlmonks.org/index.pl?node_id=484296). However, scripts or
+composed commands work just fine.
+
+B<prll> generates a lot of status information on STDERR which makes it
+harder to use the STDERR output of the job directly as input for
+another program.
+
+Here is the example from B<prll>'s man page with the equivalent
+using GNU B<parallel>:
+
+prll -s 'mogrify -flip $1' *.jpg
+
+parallel mogrify -flip ::: *.jpg
+
+
+=head2 DIFFERENCES BETWEEN dxargs AND GNU Parallel
+
+B<dxargs> is also a tool for running jobs in parallel.
+
+B<dxargs> does not deal well with more simultaneous jobs than SSHD's
+MaxStartup. B<dxargs> is only built for remote run jobs, but does not
+support transferring of files.
+
+
+=head2 DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel
+
+middleman(mdm) is also a tool for running jobs in parallel.
+
+Here are the shellscripts of http://mdm.berlios.de/usage.html ported
+to GNU B<parallel>:
+
+B<seq 1 19 | parallel -j+0 buffon -o - | sort -n >>B< result>
+
+B<cat files | parallel -j+0 cmd>
+
+B<find dir -execdir sem -j+0 cmd {} \;>
+
+=head2 DIFFERENCES BETWEEN xapply AND GNU Parallel
+
+B<xapply> can run jobs in parallel on the local computer.
+
+Here are the examples from B<xapply>'s man page with the equivalent
+using GNU B<parallel>:
+
+B<1> xapply '(cd %1 && make all)' */
+
+B<1> parallel 'cd {} && make all' ::: */
+
+B<2> xapply -f 'diff %1 ../version5/%1' manifest | more
+
+B<2> parallel diff {} ../version5/{} < manifest | more
+
+B<3> xapply -p/dev/null -f 'diff %1 %2' manifest1 checklist1
+
+B<3> parallel diff {1} {2} :::: manifest1 checklist1
+
+B<4> xapply 'indent' *.c
+
+B<4> parallel indent ::: *.c
+
+B<5> find ~ksb/bin -type f ! -perm -111 -print | xapply -f -v 'chmod a+x' -
+
+B<5> find ~ksb/bin -type f ! -perm -111 -print | parallel -v chmod a+x
+
+B<6> find */ -... | fmt 960 1024 | xapply -f -i /dev/tty 'vi' -
+
+B<6> sh <(find */ -... | parallel -s 1024 echo vi)
+
+B<6> find */ -... | parallel -s 1024 -Xuj1 vi
+
+B<7> find ... | xapply -f -5 -i /dev/tty 'vi' - - - - -
+
+B<7> sh <(find ... |parallel -n5 echo vi)
+
+B<7> find ... |parallel -n5 -uj1 vi
+
+B<8> xapply -fn "" /etc/passwd
+
+B<8> parallel -k echo < /etc/passwd
+
+B<9> tr ':' '\012' < /etc/passwd | xapply -7 -nf 'chown %1 %6' - - - - - - -
+
+B<9> tr ':' '\012' < /etc/passwd | parallel -N7 chown {1} {6}
+
+B<10> xapply '[ -d %1/RCS ] || echo %1' */
+
+B<10> parallel '[ -d {}/RCS ] || echo {}' ::: */
+
+B<11> xapply -f '[ -f %1 ] && echo %1' List | ...
+
+B<11> parallel '[ -f {} ] && echo {}' < List | ...
+
+
+=head2 DIFFERENCES BETWEEN paexec AND GNU Parallel
+
+B<paexec> can run jobs in parallel on both the local and remote computers.
+
+B<paexec> requires commands to print a blank line as the last
+output. This means you will have to write a wrapper for most programs.
+
+B<paexec> has a job dependency facility so a job can depend on another
+job to be executed successfully. Sort of a poor-man's B<make>.
+
+Here are the examples from B<paexec>'s example catalog with the equivalent
+using GNU B<parallel>:
+
+=over 1
+
+=item 1_div_X_run:
+
+ ../../paexec -s -l -c "`pwd`/1_div_X_cmd" -n +1 <<EOF [...]
+ parallel echo {} '|' `pwd`/1_div_X_cmd <<EOF [...]
+
+=item all_substr_run:
+
+ ../../paexec -lp -c "`pwd`/all_substr_cmd" -n +3 <<EOF [...]
+ parallel echo {} '|' `pwd`/all_substr_cmd <<EOF [...]
+
+=item cc_wrapper_run:
+
+ ../../paexec -c "env CC=gcc CFLAGS=-O2 `pwd`/cc_wrapper_cmd" \
+ -n 'host1 host2' \
+ -t '/usr/bin/ssh -x' <<EOF [...]
+ parallel echo {} '|' "env CC=gcc CFLAGS=-O2 `pwd`/cc_wrapper_cmd" \
+ -S host1,host2 <<EOF [...]
+ # This is not exactly the same, but avoids the wrapper
+ parallel gcc -O2 -c -o {.}.o {} \
+ -S host1,host2 <<EOF [...]
+
+=item toupper_run:
+
+ ../../paexec -lp -c "`pwd`/toupper_cmd" -n +10 <<EOF [...]
+ parallel echo {} '|' ./toupper_cmd <<EOF [...]
+ # Without the wrapper:
+ parallel echo {} '| awk {print\ toupper\(\$0\)}' <<EOF [...]
+
+=back
+
+=head2 DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel
+
+ClusterSSH solves a different problem than GNU B<parallel>.
+
+ClusterSSH runs the same command with the same arguments on a list of
+machines - one per machine. This is typically used for administrating
+several machines that are almost identical.
+
+GNU B<parallel> runs the same (or different) commands with different
+arguments in parallel possibly using remote machines to help
+computing. If more than one machine is listed in B<-S> GNU B<parallel> may
+only use one of these (e.g. if there are 8 jobs to be run and one
+machine has 8 cores).
+
+GNU B<parallel> can be used as a poor-man's version of ClusterSSH:
+
+B<cat hostlist | parallel ssh {} do_stuff>
+
+
+=head1 BUGS
+
+=head2 Quoting of newline
+
+Because of the way newline is quoted this will not work:
+
+echo 1,2,3 | parallel -vkd, "echo 'a{}'"
+
+However, this will work:
+
+echo 1,2,3 | parallel -vkd, echo a{}
+
+=head2 Startup speed
+
+GNU B<parallel> is slow at starting up. Half of the startup time on
+the local computer is spent finding the maximal length of a command
+line. Setting B<-s> will remove this part of the startup time.
+
+When using multiple computers GNU B<parallel> opens B<ssh> connections
+to them to figure out how many connections can be used reliably
+simultaneously (Namely SSHD's MaxStartup). This test is done for each
+host in serial, so if your --sshloginfile contains many hosts it may
+be slow.
+
+
+=head1 REPORTING BUGS
+
+Report bugs to <[email protected]>.
+
+
+=head1 AUTHOR
+
+Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2008,2009,2010 Ole Tange, http://ole.tange.dk
+
+Copyright (C) 2010 Ole Tange, http://ole.tange.dk and Free Software
+Foundation, Inc.
+
+Parts of the manual concerning B<xargs> compatibility is inspired by
+the manual of B<xargs> from GNU findutils 4.4.2.
+
+
+
+=head1 LICENSE
+
+Copyright (C) 2007,2008,2009,2010 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+at your option any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+=head2 Documentation license I
+
+Permission is granted to copy, distribute and/or modify this documentation
+under the terms of the GNU Free Documentation License, Version 1.3 or
+any later version published by the Free Software Foundation; with no
+Invariant Sections, with no Front-Cover Texts, and with no Back-Cover
+Texts. A copy of the license is included in the file fdl.txt.
+
+=head2 Documentation license II
+
+You are free:
+
+=over 9
+
+=item B<to Share>
+
+to copy, distribute and transmit the work
+
+=item B<to Remix>
+
+to adapt the work
+
+=back
+
+Under the following conditions:
+
+=over 9
+
+=item B<Attribution>
+
+You must attribute the work in the manner specified by the author or
+licensor (but not in any way that suggests that they endorse you or
+your use of the work).
+
+=item B<Share Alike>
+
+If you alter, transform, or build upon this work, you may distribute
+the resulting work only under the same, similar or a compatible
+license.
+
+=back
+
+With the understanding that:
+
+=over 9
+
+=item B<Waiver>
+
+Any of the above conditions can be waived if you get permission from
+the copyright holder.
+
+=item B<Public Domain>
+
+Where the work or any of its elements is in the public domain under
+applicable law, that status is in no way affected by the license.
+
+=item B<Other Rights>
+
+In no way are any of the following rights affected by the license:
+
+=over 2
+
+=item *
+
+Your fair dealing or fair use rights, or other applicable
+copyright exceptions and limitations;
+
+=item *
+
+The author's moral rights;
+
+=item *
+
+Rights other persons may have either in the work itself or in
+how the work is used, such as publicity or privacy rights.
+
+=back
+
+=back
+
+=over 9
+
+=item B<Notice>
+
+For any reuse or distribution, you must make clear to others the
+license terms of this work.
+
+=back
+
+A copy of the full license is included in the file as cc-by-sa.txt.
+
+=head1 DEPENDENCIES
+
+GNU B<parallel> uses Perl, and the Perl modules Getopt::Long,
+IPC::Open3, Symbol, IO::File, POSIX, and File::Temp. For remote usage
+it also uses Rsync with Ssh.
+
+
+=head1 SEE ALSO
+
+B<find>(1), B<xargs>(1), B<make>(1), B<pexec>(1), B<ppss>(1),
+B<xjobs>(1), B<prll>(1), B<dxargs>(1), B<mdm>(1)
+
+=cut
+
+
+use IPC::Open3;
+use Symbol qw(gensym);
+use IO::File;
+use POSIX qw(:sys_wait_h setsid);
+use File::Temp qw(tempfile tempdir);
+use Getopt::Long;
+use strict;
+
+do_not_reap();
+parse_options();
+init_run_jobs();
+my $sem;
+if($Global::semaphore) {
+ $sem = acquire_semaphore();
+}
+start_more_jobs();
+reap_if_needed();
+drain_job_queue();
+cleanup();
+if($Global::semaphore) {
+ $sem->release();
+}
+if($::opt_halt_on_error) {
+ wait_and_exit($Global::halt_on_error_exitstatus);
+} else {
+ wait_and_exit(min(undef_as_zero($Global::exitstatus),254));
+}
+
+sub acquire_semaphore {
+ # Acquires semaphore. If needed: spawns to the background
+ # Returns:
+ # The semaphore to be released when jobs is complete
+ my $sem = Semaphore->new($Semaphore::name,$Global::host{':'}{'max_no_of_running'});
+ $sem->acquire();
+ debug("run");
+ if($Semaphore::fg) {
+ # skip
+ } else {
+ # If run in the background, the PID will change
+ # therefore release and re-acquire the semaphore
+ $sem->release();
+ if(fork()) {
+ exit(0);
+ } else {
+ # child
+ # Get a semaphore for this pid
+ die "Can't start a new session: $!" if setsid() == -1;
+ $sem = Semaphore->new($Semaphore::name,$Global::host{':'}{'max_no_of_running'});
+ $sem->acquire();
+ }
+ }
+ return $sem;
+}
+
+sub get_options_from_array {
+ # Run GetOptions on @array
+ # Returns:
+ # true if parsing worked
+ # false if parsing failed
+ # @array is changed
+ my $array_ref = shift;
+ # A bit of shuffling of @ARGV needed as GetOptionsFromArray is not
+ # supported everywhere
+ my @save_argv;
+ my $this_is_ARGV = (\@::ARGV == $array_ref);
+ if(not $this_is_ARGV) {
+ @save_argv = @::ARGV;
+ @::ARGV = @{$array_ref};
+ }
+ my @retval = GetOptions
+ ("debug|D" => \$::opt_D,
+ "xargs|m" => \$::opt_m,
+ "X" => \$::opt_X,
+ "v" => \@::opt_v,
+ "silent" => \$::opt_silent,
+ "keep-order|keeporder|k" => \$::opt_k,
+ "group|g" => \$::opt_g,
+ "ungroup|u" => \$::opt_u,
+ "command|c" => \$::opt_c,
+ "file|f" => \$::opt_f,
+ "null|0" => \$::opt_0,
+ "quote|q" => \$::opt_q,
+ "I=s" => \$::opt_I,
+ "extensionreplace|U=s" => \$::opt_U,
+ "jobs|j=s" => \$::opt_P,
+ "max-line-length-allowed" => \$::opt_max_line_length_allowed,
+ "number-of-cpus" => \$::opt_number_of_cpus,
+ "number-of-cores" => \$::opt_number_of_cores,
+ "use-cpus-instead-of-cores" => \$::opt_use_cpus_instead_of_cores,
+ "sshlogin|S=s" => \@::opt_sshlogin,
+ "sshloginfile=s" => \$::opt_sshloginfile,
+ "controlmaster|M" => \$::opt_controlmaster,
+ "return=s" => \@::opt_return,
+ "trc=s" => \@::opt_trc,
+ "transfer" => \$::opt_transfer,
+ "cleanup" => \$::opt_cleanup,
+ "basefile|B=s" => \@::opt_basefile,
+ "workdir|W=s" => \$::opt_workdir,
+ "halt-on-error|H=s" => \$::opt_halt_on_error,
+ "retries=i" => \$::opt_retries,
+ "progress" => \$::opt_progress,
+ "eta" => \$::opt_eta,
+ "arg-sep|argsep=s" => \$::opt_arg_sep,
+ "arg-file-sep|argfilesep=s" => \$::opt_arg_file_sep,
+ "trim=s" => \$::opt_trim,
+ "profile|J=s" => \$::opt_profile,
+ # xargs-compatibility - implemented, man, unittest
+ "max-procs|P=s" => \$::opt_P,
+ "delimiter|d=s" => \$::opt_d,
+ "max-chars|s=i" => \$::opt_s,
+ "arg-file|a=s" => \@::opt_a,
+ "no-run-if-empty|r" => \$::opt_r,
+ "replace|i:s" => \$::opt_i,
+ "E=s" => \$::opt_E,
+ "eof|e:s" => \$::opt_E,
+ "max-args|n=i" => \$::opt_n,
+ "max-replace-args|N=i" => \$::opt_N,
+ "colsep|col-sep|C=s" => \$::opt_colsep,
+ "help|h" => \$::opt_help,
+ "L=i" => \$::opt_L,
+ "max-lines|l:i" => \$::opt_l,
+ "interactive|p" => \$::opt_p,
+ "verbose|t" => \$::opt_verbose,
+ "version|V" => \$::opt_version,
+ "show-limits" => \$::opt_show_limits,
+ "exit|x" => \$::opt_x,
+ # Semaphore
+ "semaphore" => \$::opt_semaphore,
+ "semaphoretimeout=i" => \$::opt_semaphoretimeout,
+ "semaphorename|id=s" => \$::opt_semaphorename,
+ "fg" => \$::opt_fg,
+ "bg" => \$::opt_bg,
+ "wait" => \$::opt_wait,
+ # Shebang #!/usr/bin/parallel -Yotheroptions
+ "Y|shebang|hashbang" => \$::opt_shebang,
+ "skip-first-line" => \$::opt_skip_first_line,
+ );
+ if(not $this_is_ARGV) {
+ @{$array_ref} = @::ARGV;
+ @::ARGV = @save_argv;
+ }
+ return @retval;
+}
+
+sub parse_options {
+ # Returns: N/A
+ # Defaults:
+ $Global::version = 20101115;
+ $Global::progname = 'parallel';
+ $Global::debug = 0;
+ $Global::verbose = 0;
+ $Global::grouped = 1;
+ $Global::keeporder = 0;
+ $Global::quoting = 0;
+ $Global::replacestring = '{}';
+ $Global::replace_no_ext = '{.}';
+ $/="\n";
+ $Global::ignore_empty = 0;
+ $Global::argfile = *STDIN;
+ $Global::interactive = 0;
+ $Global::stderr_verbose = 0;
+ $Global::default_simultaneous_sshlogins = 9;
+ $Global::exitstatus = 0;
+ $Global::halt_on_error_exitstatus = 0;
+ $Global::total_jobs = 0;
+ $Global::arg_sep = ":::";
+ $Global::arg_file_sep = "::::";
+ $Global::trim = 'n';
+
+ @ARGV=read_options();
+
+ if(defined @::opt_v) { $Global::verbose = $#::opt_v+1; } # Convert -v -v to v=2
+ $Global::debug = (defined $::opt_D);
+ if(defined $::opt_m) { $Global::xargs = 1; }
+ if(defined $::opt_X) { $Global::Xargs = 1; }
+ if(defined $::opt_silent) { $Global::verbose = 0; }
+ if(defined $::opt_k) { $Global::keeporder = 1; }
+ if(defined $::opt_g) { $Global::grouped = 1; }
+ if(defined $::opt_u) { $Global::grouped = 0; }
+ if(defined $::opt_c) { $Global::input_is_filename = 0; }
+ if(defined $::opt_f) { $Global::input_is_filename = 1; }
+ if(defined $::opt_0) { $/ = "\0"; }
+ if(defined $::opt_d) { my $e="sprintf \"$::opt_d\""; $/ = eval $e; }
+ if(defined $::opt_p) { $Global::interactive = $::opt_p; }
+ if(defined $::opt_q) { $Global::quoting = 1; }
+ if(defined $::opt_r) { $Global::ignore_empty = 1; }
+ if(defined $::opt_verbose) { $Global::stderr_verbose = 1; }
+ if(defined $::opt_I) { $Global::replacestring = $::opt_I; }
+ if(defined $::opt_U) { $Global::replace_no_ext = $::opt_U; }
+ if(defined $::opt_i and $::opt_i) { $Global::replacestring = $::opt_i; }
+ if(defined $::opt_E and $::opt_E) { $Global::end_of_file_string = $::opt_E; }
+ if(defined $::opt_n and $::opt_n) { $Global::max_number_of_args = $::opt_n; }
+ if(defined $::opt_N and $::opt_N) { $Global::max_number_of_args = $::opt_N; }
+ if(defined $::opt_help) { die_usage(); }
+ if(defined $::opt_colsep) { $Global::trim = 'lr'; }
+ if(defined $::opt_trim) { $Global::trim = $::opt_trim; }
+ if(defined $::opt_arg_sep) { $Global::arg_sep = $::opt_arg_sep; }
+ if(defined $::opt_arg_file_sep) { $Global::arg_file_sep = $::opt_arg_file_sep; }
+ if(defined $::opt_number_of_cpus) { print no_of_cpus(),"\n"; wait_and_exit(0); }
+ if(defined $::opt_number_of_cores) { print no_of_cores(),"\n"; wait_and_exit(0); }
+ if(defined $::opt_max_line_length_allowed) { print real_max_length(),"\n"; wait_and_exit(0); }
+ if(defined $::opt_version) { version(); wait_and_exit(0); }
+ if(defined $::opt_show_limits) { show_limits(); }
+ if(defined @::opt_sshlogin) { @Global::sshlogin = @::opt_sshlogin; }
+ if(defined $::opt_sshloginfile) { read_sshloginfile($::opt_sshloginfile); }
+ if(defined @::opt_return) { push @Global::ret_files, @::opt_return; }
+ if(defined $::opt_semaphore) { $Global::semaphore = 1; }
+ if(defined $::opt_semaphoretimeout) { $Global::semaphore = 1; }
+ if(defined $::opt_semaphorename) { $Global::semaphore = 1; }
+ if(defined $::opt_fg) { $Global::semaphore = 1; }
+ if(defined $::opt_bg) { $Global::semaphore = 1; }
+ if(defined $::opt_wait) { $Global::semaphore = 1; }
+ if(defined @::opt_trc) {
+ push @Global::ret_files, @::opt_trc;
+ $::opt_transfer = 1;
+ $::opt_cleanup = 1;
+ }
+ if(defined $::opt_L and $::opt_L or defined $::opt_l) {
+ $Global::max_lines = $::opt_l || $::opt_L || 1;
+ $Global::max_number_of_args ||= $Global::max_lines;
+ }
+
+ if(grep /^$Global::arg_sep$/o, @ARGV) {
+ # Deal with :::
+ @ARGV=read_args_from_command_line();
+ }
+
+ if(grep /^$Global::arg_file_sep$/o, @ARGV) {
+ # Deal with ::::
+ @ARGV=convert_argfiles_from_command_line_to_multiple_opt_a();
+ }
+
+ # must be done after ::: and :::: because they mess with @ARGV
+ $Global::input_is_filename ||= (@ARGV);
+
+ if(@::opt_a) {
+ # must be done after
+ # convert_argfiles_from_command_line_to_multiple_opt_a
+ if($#::opt_a == 0) {
+ # One -a => xargs compatibility
+ $Global::argfile = open_or_exit($::opt_a[0]);
+ if($::opt_skip_first_line) {
+ <$Global::argfile>; # Read first line and forget it
+ }
+ } else {
+ # Multiple -a => xapply style
+ argfiles_xapply_style();
+ }
+ }
+
+ if(($::opt_l || $::opt_L || $::opt_n || $::opt_N || $::opt_s ||
+ $::opt_colsep) and not ($::opt_m or $::opt_X)) {
+ # The options --max-line, -l, -L, --max-args, -n, --max-chars, -s
+ # do not make sense without -X or -m
+ # so default to -X
+ # Needs to be done after :::: and @opt_a, as they can set $::opt_N
+ $Global::Xargs = 1;
+ }
+
+ # Semaphore defaults
+ # Must be done before computing number of processes and max_line_length
+ # because when running as a semaphore GNU Parallel does not read args
+ $Global::semaphore ||= ($0 =~ m:(^|/)sem$:); # called as 'sem'
+ if($Global::semaphore) {
+ # A semaphore does not take input from neither stdin nor file
+ $Global::argfile = open_or_exit("/dev/null");
+ unget_arg("");
+ $Semaphore::timeout = $::opt_semaphoretimeout || 0;
+ if(defined $::opt_semaphorename) {
+ $Semaphore::name = $::opt_semaphorename;
+ } else {
+ $Semaphore::name = `tty`;
+ chomp $Semaphore::name;
+ }
+ $Semaphore::fg = $::opt_fg;
+ $Semaphore::wait = $::opt_wait;
+ $Global::default_simultaneous_sshlogins = 1;
+ }
+
+ if(defined $::opt_eta) {
+ # must be done after opt_a because we need to read all args
+ $::opt_progress = $::opt_eta;
+ my @args = ();
+ while(more_arguments()) {
+ # This will read all arguments and compute $Global::total_jobs
+ push @args, get_arg();
+ }
+ unget_arg(@args);
+ }
+
+ if(@ARGV) {
+ if($Global::quoting) {
+ $Global::command = shell_quote(@ARGV);
+ } else {
+ $Global::command = join(" ", @ARGV);
+ }
+ }
+
+ parse_sshlogin();
+
+ if(remote_hosts() and ($Global::xargs or $Global::Xargs)
+ and not $::opt_N) {
+ # As we do not know the max line length on the remote machine
+ # long commands generated by xargs may fail
+ # If opt_N is set, it is probably safe
+ print STDERR ("Warning: using -X or -m with --sshlogin may fail\n");
+ }
+
+ # Needs to be done after setting $Global::command and $Global::command_line_max_len
+ # as '-m' influences the number of commands that needs to be run
+ if(defined $::opt_P) {
+ compute_number_of_processes_for_sshlogins();
+ } else {
+ for my $sshlogin (keys %Global::host) {
+ $Global::host{$sshlogin}{'max_no_of_running'} =
+ $Global::default_simultaneous_sshlogins;
+ }
+ }
+
+ if(-t $Global::argfile) {
+ print STDERR "$Global::progname: Input is tty. Press CTRL-D to exit.\n";
+ }
+}
+
+sub read_options {
+ # Read options from command line, profile and $PARALLEL
+ # Returns:
+ # @ARGV without --options
+ # This must be done first as this may exec myself
+ if(defined $ARGV[0] and ($ARGV[0]=~/^-Y/ or $ARGV[0]=~/^--shebang / or
+ $ARGV[0]=~/^--hashbang /)) {
+ # Program is called from #! line in script
+ $ARGV[0]=~s/^-Y( |$)//; # remove -Y if on its own
+ $ARGV[0]=~s/^-Y/-/; # remove -Y if bundled with other options
+ $ARGV[0]=~s/^--shebang *//; # remove --shebang if it is set
+ $ARGV[0]=~s/^--hashbang *//; # remove --hashbang if it is set
+ my $argfile = pop @ARGV;
+ # exec myself to split $ARGV[0] into separate fields
+ exec "$0 --skip-first-line -a $argfile @ARGV";
+ }
+
+ Getopt::Long::Configure("bundling","pass_through");
+ # Check if there is a --profile to set $::opt_profile
+ GetOptions("profile|J=s" => \$::opt_profile) || die_usage();
+ # Add options from .parallel/config and other profiles
+ my @ARGV_profile = ();
+ my @ARGV_env = ();
+ my @config_profiles = ($ENV{'HOME'}."/.parallel/config",
+ $ENV{'HOME'}."/.parallelrc");
+ my @profiles = @config_profiles;
+ if($::opt_profile) {
+ # --profile overrides default profiles
+ @profiles = ($ENV{'HOME'}."/.parallel/".$::opt_profile);
+ }
+ for my $profile (@profiles) {
+ if(-r $profile) {
+ open (IN, "<", $profile) || die;
+ while(<IN>) {
+ /^\s*\#/ and next;
+ chomp;
+ push @ARGV_profile, shell_unquote(split/(?<![\\])\s/, $_);
+ }
+ close IN;
+ } else {
+ if(grep /^$profile$/, @config_profiles) {
+ # config file is not required to exist
+ } else {
+ print STDERR "$profile not readable\n";
+ wait_and_exit(255);
+ }
+ }
+ }
+ Getopt::Long::Configure("bundling","require_order");
+ get_options_from_array(\@ARGV_profile) || die_usage();
+ # Add options from shell variable $PARALLEL
+ $ENV{'PARALLEL'} and @ARGV_env = shell_unquote(split/(?<![\\])\s/, $ENV{'PARALLEL'});
+ get_options_from_array(\@ARGV_env) || die_usage();
+ get_options_from_array(\@ARGV) || die_usage();
+
+ # Prepend non-options to @ARGV (such as commands like 'nice')
+ unshift @ARGV, @ARGV_profile, @ARGV_env;
+ return @ARGV;
+}
+
+sub read_args_from_command_line {
+ # Arguments given on the command line after ::: ($Global::arg_sep)
+ # Removes the arguments from @ARGV and puts it into the argument queue
+ # Ignore STDIN by reading from /dev/null
+ # or another file if user has given --arg-file
+ # Returns:
+ # @ARGV without ::: and following args
+ if(not @::opt_a) { push @::opt_a, "/dev/null"; }
+ # Input: @ARGV = command option ::: arg arg arg
+ my @new_argv = ();
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($arg eq $Global::arg_sep) {
+ $Global::input_is_filename = (@new_argv);
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($Global::end_of_file_string and
+ $arg eq $Global::end_of_file_string) {
+ # Ignore the rest of ARGV
+ @ARGV=();
+ }
+ if($Global::ignore_empty) {
+ if($arg =~ /^\s*$/) { next; }
+ }
+ if($Global::max_lines and $#ARGV >=0) {
+ if($arg =~ /\s$/) {
+ # Trailing space => continued on next line
+ $arg .= shift @ARGV;
+ }
+ }
+ unget_argv($arg);
+ $Global::total_jobs++;
+ }
+ last;
+ } else {
+ push @new_argv, $arg;
+ }
+ }
+ # Output: @ARGV = command option
+ return @new_argv;
+}
+
+sub convert_argfiles_from_command_line_to_multiple_opt_a {
+ # Convert :::: to multiple -a
+ # Remove :::: from @ARGV and move the following arguments to @::opt_a
+ # Returns:
+ # @ARGV without :::: and following args
+ my @new_argv = ();
+ my @argument_files;
+ while(@ARGV) {
+ my $arg = shift @ARGV;
+ if($arg eq $Global::arg_file_sep) {
+ @argument_files = @ARGV;
+ @ARGV=();
+ } else {
+ push @new_argv, $arg;
+ }
+ }
+ # Output: @ARGV = command option
+ push @::opt_a, @argument_files;
+ return @new_argv;
+}
+
+sub argfiles_xapply_style {
+ # Multiple -a => xapply style
+ # Convert the n files into one queue
+ # Every n'th entry is from the same file
+ # Set opt_N to read n entries per invocation
+ # Returns: N/A
+ $Global::argfile = open_or_exit("/dev/null");
+ $::opt_N = $#::opt_a+1;
+ $Global::max_number_of_args = $#::opt_a+1;
+ # read the files
+ my @content;
+ my $max_lineno = 0;
+ my $in_fh = gensym;
+ for (my $fileno = 0; $fileno <= $#::opt_a; $fileno++) {
+ $in_fh = open_or_exit($::opt_a[$fileno]);
+ if($::opt_skip_first_line and $fileno == 0) {
+ <$in_fh>; # Read first line and forget it
+ }
+ for (my $lineno=0;
+ $content[$fileno][$lineno] = get_arg($in_fh);
+ $lineno++) {
+ $max_lineno = max($max_lineno,$lineno);
+ }
+ close $in_fh;
+ }
+ for (my $lineno=0; $lineno <= $max_lineno; $lineno++) {
+ for (my $fileno = 0; $fileno <= $#::opt_a; $fileno++) {
+ my $arg = $content[$fileno][$lineno];
+ if($Global::trim ne 'n') {
+ $arg = trim($arg);
+ }
+ if(defined $arg) {
+ unget_arg($arg);
+ } else {
+ unget_arg("");
+ }
+ }
+ }
+ $Global::total_jobs += $max_lineno;
+}
+
+sub open_or_exit {
+ # Returns:
+ # file handle to read-opened file
+ # exits if file cannot be opened otherwise
+ my $file = shift;
+ my $fh = gensym;
+ if(not open($fh,"<",$file)) {
+ print STDERR "$Global::progname: ".
+ "Cannot open input file `$file': ".
+ "No such file or directory\n";
+ wait_and_exit(255);
+ }
+ return $fh;
+}
+
+sub cleanup {
+ # Returns: N/A
+ if(@::opt_basefile) {
+ cleanup_basefile();
+ }
+}
+
+#
+# Generating the command line
+#
+
+sub no_extension {
+ # Returns:
+ # argument with .extension removed if any
+ my $no_ext = shift;
+ $no_ext =~ s:\.[^/\.]*$::; # Remove .ext from argument
+ return $no_ext;
+}
+
+sub trim {
+ # Removes white space as specifed by --trim:
+ # n = nothing
+ # l = start
+ # r = end
+ # lr|rl = both
+ # Returns:
+ # string with white space removed as needed
+ my (@strings) = map { defined $_ ? $_ : "" } (@_);
+ my $arg;
+ if($Global::trim eq "n") {
+ # skip
+ } elsif($Global::trim eq "l") {
+ for $arg (@strings) { $arg =~ s/^\s+//; }
+ } elsif($Global::trim eq "r") {
+ for $arg (@strings) { $arg =~ s/\s+$//; }
+ } elsif($Global::trim eq "rl" or $Global::trim eq "lr") {
+ for $arg (@strings) { $arg =~ s/^\s+//; $arg =~ s/\s+$//; }
+ } else {
+ print STDERR "$Global::progname: --trim must be one of: r l rl lr\n";
+ wait_and_exit(255);
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+
+sub generate_command_line {
+ # Returns:
+ # the full job line to run
+ # list of quoted arguments on that line
+ my $command = shift;
+ my ($job_line,$last_good);
+ my ($quoted_args,$quoted_args_no_ext) =
+ get_multiple_args($command,max_length_of_command_line(),0);
+ my $is_substituted = 0;
+
+ if(@$quoted_args) {
+ $job_line = $command;
+ if(defined $job_line and
+ ($job_line =~/\Q$Global::replacestring\E/o or
+ $job_line =~/\Q$Global::replace_no_ext\E/o)) {
+ # substitute {} and {.} with args
+ if($Global::Xargs) {
+ # Context sensitive replace (foo{}bar with fooargsbar)
+ $job_line =
+ context_replace($job_line, $quoted_args, $quoted_args_no_ext);
+ } else {
+ # Normal replace {} with args and {.} with args without extension
+ my $arg=join(" ",@$quoted_args);
+ my $arg_no_ext=join(" ",@$quoted_args_no_ext);
+ $job_line =~ s/\Q$Global::replacestring\E/$arg/go;
+ $job_line =~ s/\Q$Global::replace_no_ext\E/$arg_no_ext/go;
+ }
+ $is_substituted = 1;
+ }
+ if(defined $job_line and $::opt_N) {
+ if($job_line =~/\{\d+\}/o) {
+ # substitute {#} with args
+ for my $argno (1..$::opt_N) {
+ my $arg = $quoted_args->[$argno-1];
+ if(defined $arg) {
+ $job_line =~ s/\{$argno\}/$arg/g;
+ } else {
+ $job_line =~ s/\{$argno\}//g;
+ }
+ }
+ $is_substituted = 1;
+ }
+ if($job_line =~/\{\d+\.\}/o) {
+ # substitute {#.} with args
+ for my $argno (1..$::opt_N) {
+ my $arg = no_extension($quoted_args->[$argno-1]);
+ if(defined $arg) {
+ $job_line =~ s/\{$argno\.\}/$arg/g;
+ } else {
+ $job_line =~ s/\{$argno\.\}//g;
+ }
+ }
+ $is_substituted = 1;
+ }
+ }
+ if (not $is_substituted) {
+ # append args
+ my $arg=join(" ",@$quoted_args);
+ if($job_line) {
+ $job_line .= " ".$arg;
+ } else {
+ # Parallel behaving like '|sh'
+ $job_line = $arg;
+ }
+ }
+ debug("Return jobline(",length($job_line),"): !",$job_line,"!\n");
+ }
+ return ($job_line,$quoted_args);
+}
+
+sub get_multiple_args {
+ # Returns:
+ # \@quoted_args - empty if no more args
+ # \@quoted_args_no_ext
+ my ($command,$max_length_of_command_line,$test_only_mode) = (@_);
+ my ($next_arg,@quoted_args,@quoted_args_no_ext,$arg_length);
+ my ($number_of_substitution,
+ $number_of_substitution_no_ext,$spaces,
+ $length_of_command_no_args,$length_of_context) =
+ xargs_computations($command);
+ my $number_of_args = 0;
+ while (defined($next_arg = get_arg())) {
+ my $next_arg_no_ext = no_extension($next_arg);
+ push (@quoted_args, $next_arg);
+ push (@quoted_args_no_ext, $next_arg_no_ext);
+ $number_of_args++;
+
+ # Emulate xargs if there is a command and -x or -X is set
+ my $next_arg_len =
+ $number_of_substitution * (length ($next_arg) + $spaces)
+ + $number_of_substitution_no_ext * (length ($next_arg_no_ext) + $spaces)
+ + $length_of_context;
+ $arg_length += $next_arg_len;
+ my $job_line_length = $length_of_command_no_args + $arg_length;
+ if($job_line_length >= $max_length_of_command_line) {
+ unget_arg(pop @quoted_args);
+ pop @quoted_args_no_ext;
+ if($test_only_mode) {
+ last;
+ }
+ if($::opt_x and $length_of_command_no_args + $next_arg_len
+ >= $max_length_of_command_line) {
+ # To be compatible with xargs -x
+ print STDERR ("Command line too long ($job_line_length >= "
+ . $max_length_of_command_line .
+ ") at number $number_of_args: ".
+ (substr($next_arg,0,50))."...\n");
+ wait_and_exit(255);
+ }
+ if(defined $quoted_args[0]) {
+ last;
+ } else {
+ print STDERR ("Command line too long ($job_line_length >= "
+ . $max_length_of_command_line .
+ ") at number $number_of_args: ".
+ (substr($next_arg,0,50))."...\n");
+ wait_and_exit(255);
+ }
+ }
+ if($Global::max_number_of_args and
+ $number_of_args >= $Global::max_number_of_args) {
+ last;
+ }
+ if(not $Global::xargs and not $Global::Xargs) {
+ # No xargs-mode: Just one argument per line
+ last;
+ }
+ }
+ return (\@quoted_args,\@quoted_args_no_ext);
+}
+
+
+sub xargs_computations {
+ # Returns:
+ # $number_of_substitution = number of {}'s
+ # $number_of_substitution_no_ext = number of {.}'s
+ # $spaces = is a single space needed at the start?
+ # $length_of_command_no_args = length of command line with args removed
+ # $length_of_context = context needed for each additional arg
+
+ my $command = shift;
+ if(not @Calculated::xargs_computations) {
+ my ($number_of_substitution, $number_of_substitution_no_ext,
+ $spaces,$length_of_command_no_args,$length_of_context)
+ = (1,0,0,0,0);
+ if($command) {
+ if($command !~ /\s\S*\Q$Global::replacestring\E\S*|\s\S*\Q$Global::replace_no_ext\E\S*/o) {
+ # No replacement strings: add {}
+ $command .= " ".$Global::replacestring;
+ }
+ # Count number of {}'s on the command line
+ my $no_of_replace =
+ ($command =~ s/\Q$Global::replacestring\E/$Global::replacestring/go);
+ $number_of_substitution = $no_of_replace || 1;
+ # Count number of {.}'s on the command line
+ my $no_of_no_ext =
+ ($command =~ s/\Q$Global::replace_no_ext\E/$Global::replace_no_ext/go);
+ $number_of_substitution_no_ext = $no_of_no_ext || 0;
+ # Count
+ my $c = $command;
+ if($Global::Xargs) {
+ $c =~ s/\s\S*\Q$Global::replacestring\E\S*|\s\S*\Q$Global::replace_no_ext\E\S*//go;
+ $length_of_command_no_args = length($c);
+ $length_of_context = length($command) - $length_of_command_no_args
+ - $no_of_replace * length($Global::replacestring)
+ - $no_of_no_ext * length($Global::replace_no_ext);
+ $spaces = 0;
+ debug("length_of_command_no_args ",$length_of_command_no_args,"\n");
+ debug("length_of_context ",$length_of_context,"\n");
+ debug("no_of_replace ",$no_of_replace," no_of_no_ext ",$no_of_no_ext,"\n");
+ } else {
+ # remove all {}s
+ $c =~ s/\Q$Global::replacestring\E|\Q$Global::replace_no_ext\E//og;
+ $length_of_command_no_args = length($c) -
+ $no_of_replace - $no_of_no_ext;
+ $length_of_context = 0;
+ $spaces = 1;
+ }
+ }
+ @Calculated::xargs_computations =
+ ($number_of_substitution, $number_of_substitution_no_ext,
+ $spaces,$length_of_command_no_args,$length_of_context);
+ }
+ return (@Calculated::xargs_computations);
+}
+
+
+sub shell_quote {
+ # Quote the string so shell will not expand any special chars
+ # Returns:
+ # string quoted with \ as needed by the shell
+ my (@strings) = (@_);
+ my $arg;
+ for $arg (@strings) {
+ $arg =~ s/\\/\\\\/g;
+
+ $arg =~ s/([\#\?\`\(\)\*\>\<\~\|\; \"\!\$\&\'])/\\$1/g;
+ $arg =~ s/([\002-\011\013-\032])/\\$1/g;
+ $arg =~ s/([\n])/'\n'/g; # filenames with '\n' is quoted using \'
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+
+sub shell_unquote {
+ # Unquote strings from shell_quote
+ # Returns:
+ # string with shell quoting removed
+ my (@strings) = (@_);
+ my $arg;
+ for $arg (@strings) {
+ $arg =~ s/'\n'/\n/g; # filenames with '\n' is quoted using \'
+ $arg =~ s/\\([\002-\011\013-\032])/$1/g;
+ $arg =~ s/\\([\#\?\`\(\)\*\>\<\~\|\; \"\!\$\&\'])/$1/g;
+ $arg =~ s/\\\\/\\/g;
+ }
+ return wantarray ? @strings : "@strings";
+}
+
+
+sub context_replace {
+ # Replace foo{}bar or foo{.}bar
+ # Returns:
+ # jobline with {} and {.} expanded to args
+ my ($job_line,$quoted,$no_ext) = (@_);
+ while($job_line =~/\Q$Global::replacestring\E|\Q$Global::replace_no_ext\E/o) {
+ $job_line =~ /(\S*(\Q$Global::replacestring\E|\Q$Global::replace_no_ext\E)\S*)/o
+ or die ("This should never happen");
+ my $wordarg = $1; # This is the context that needs to be substituted
+ my @all_word_arg;
+ for my $n (0 .. $#$quoted) {
+ my $arg = $quoted->[$n];
+ my $arg_no_ext = $no_ext->[$n];
+ my $substituted = $wordarg;
+ $substituted=~s/\Q$Global::replacestring\E/$arg/go;
+ $substituted=~s/\Q$Global::replace_no_ext\E/$arg_no_ext/go;
+ push @all_word_arg, $substituted;
+ }
+ my $all_word_arg = join(" ",@all_word_arg);
+ $job_line =~ s/\Q$wordarg\E/$all_word_arg/;
+ }
+ return $job_line;
+}
+
+sub __NUMBER_OF_PROCESSES_FILEHANDLES_MAX_LENGTH_OF_COMMAND_LINE__ {}
+
+# Maximal command line length (for -m and -X)
+sub max_length_of_command_line {
+ # Find the max_length of a command line
+ # Returns:
+ # number of chars on the longest command line allowed
+ if(not $Private::command_line_max_len) {
+ $Private::command_line_max_len = limited_max_length();
+ if($::opt_s) {
+ if($::opt_s <= $Private::command_line_max_len) {
+ $Private::command_line_max_len = $::opt_s;
+ } else {
+ print STDERR "$Global::progname: ",
+ "value for -s option should be < $Private::command_line_max_len\n";
+ }
+ }
+ }
+ return $Private::command_line_max_len;
+}
+
+sub max_length_limited_by_opt_s {
+ # Returns:
+ # min(opt_s, number of chars on the longest command line allowed)
+ if(is_acceptable_command_line_length($::opt_s)) {
+ debug("-s is OK: ",$::opt_s,"\n");
+ return $::opt_s;
+ }
+ # -s is too long: Find the correct
+ return binary_find_max_length(0,$::opt_s);
+}
+
+sub limited_max_length {
+ # Returns:
+ # min(opt_s, number of chars on the longest command line allowed)
+ if($::opt_s) { return max_length_limited_by_opt_s() }
+
+ return real_max_length();
+}
+
+sub real_max_length {
+ # Returns:
+ # The maximal command line length
+ # Use an upper bound of 8 MB if the shell allows for for infinite long lengths
+ my $upper = 8_000_000;
+ my $len = 8;
+ do {
+ if($len > $upper) { return $len };
+ $len *= 16;
+ } while (is_acceptable_command_line_length($len));
+ # Then search for the actual max length between 0 and upper bound
+ return binary_find_max_length(int($len/16),$len);
+}
+
+sub binary_find_max_length {
+ # Given a lower and upper bound find the max_length of a command line
+ # Returns:
+ # number of chars on the longest command line allowed
+ my ($lower, $upper) = (@_);
+ if($lower == $upper or $lower == $upper-1) { return $lower; }
+ my $middle = int (($upper-$lower)/2 + $lower);
+ debug("Maxlen: $lower,$upper,$middle\n");
+ if (is_acceptable_command_line_length($middle)) {
+ return binary_find_max_length($middle,$upper);
+ } else {
+ return binary_find_max_length($lower,$middle);
+ }
+}
+
+sub is_acceptable_command_line_length {
+ # Test if a command line of this length can run
+ # Returns:
+ # 0 if the command line length is too long
+ # 1 otherwise
+ my $len = shift;
+
+ $Private::is_acceptable_command_line_length++;
+ debug("$Private::is_acceptable_command_line_length $len\n");
+ local *STDERR;
+ open (STDERR,">/dev/null");
+ system "true "."x"x$len;
+ close STDERR;
+ debug("$len $?\n");
+ return not $?;
+}
+
+# Number of parallel processes to run
+
+sub compute_number_of_processes_for_sshlogins {
+ for my $sshlogin (keys %Global::host) {
+ $Global::host{$sshlogin}{'max_no_of_running'} =
+ compute_number_of_processes($::opt_P,$sshlogin);
+ }
+}
+
+sub compute_number_of_processes {
+ # Number of processes wanted and limited by system resources
+ # Returns:
+ # Number of processes
+ my $opt_P = shift;
+ my $sshlogin = shift;
+ my $wanted_processes = user_requested_processes($opt_P,$sshlogin);
+ debug("Wanted procs: $wanted_processes\n");
+ my $system_limit =
+ processes_available_by_system_limit($wanted_processes,$sshlogin);
+ debug("Limited to procs: $system_limit\n");
+ return $system_limit;
+}
+
+sub processes_available_by_system_limit {
+ # If the wanted number of processes is bigger than the system limits:
+ # Limit them to the system limits
+ # Limits are: File handles, number of input lines, processes,
+ # and taking > 1 second to spawn 10 extra processes
+ # Returns:
+ # Number of processes
+
+ my $wanted_processes = shift;
+ my $sshlogin = shift;
+ my $system_limit=0;
+ my @command_lines=();
+ my ($next_command_line, $args_ref);
+ my $more_filehandles;
+ my $max_system_proc_reached=0;
+ my $slow_spawining_warning_printed=0;
+ my $time = time;
+ my %fh;
+ my @children;
+ do_not_reap();
+
+ # Reserve filehandles
+ # perl uses 7 filehandles for something?
+ # parallel uses 1 for memory_usage
+ for my $i (1..8) {
+ open($fh{"init-$i"},"</dev/null");
+ }
+ do {
+ $system_limit++;
+
+ if(not $Global::semaphore) {
+ # If there are no more command lines, then we have a process
+ # per command line, so no need to go further
+ ($next_command_line, $args_ref) = get_command_line();
+ if(defined $next_command_line) {
+ push(@command_lines, $next_command_line, $args_ref);
+ }
+ }
+
+ # Every simultaneous process uses 2 filehandles when grouping
+ $more_filehandles = open($fh{$system_limit*2},"</dev/null")
+ && open($fh{$system_limit*2+1},"</dev/null");
+
+ # System process limit
+ $system_limit % 10 or $time=time;
+ my $child;
+ if($child = fork()) {
+ push (@children,$child);
+ } elsif(defined $child) {
+ # The child takes one process slot
+ # It will be killed later
+ sleep 100000;
+ wait_and_exit(0);
+ } else {
+ $max_system_proc_reached = 1;
+ }
+ debug("Time to fork ten procs: ", time-$time, " (processes so far: ", $system_limit,")\n");
+ if(time-$time > 2 and not $slow_spawining_warning_printed) {
+ # It took more than 2 second to fork ten processes.
+ # Give the user a warning. He can press Ctrl-C if this
+ # sucks.
+ print STDERR ("Warning: Starting 10 extra processes takes > 2 sec.\n",
+ "Consider adjusting -j. Press CTRL-C to stop.\n");
+ $slow_spawining_warning_printed = 1;
+ }
+ } while($system_limit < $wanted_processes
+ and (defined $next_command_line or $Global::semaphore)
+ and $more_filehandles
+ and not $max_system_proc_reached);
+ if($system_limit < $wanted_processes and not $more_filehandles) {
+ print STDERR ("Warning: Only enough filehandles to run ",
+ $system_limit, " jobs in parallel. ",
+ "Raising ulimit -n may help\n");
+ }
+ if($system_limit < $wanted_processes and $max_system_proc_reached) {
+ print STDERR ("Warning: Only enough available processes to run ",
+ $system_limit, " jobs in parallel.\n");
+ }
+ # Cleanup: Close the files
+ for (values %fh) { close $_ }
+ # Cleanup: Kill the children
+ for my $pid (@children) {
+ kill 9, $pid;
+ waitpid($pid,0);
+ }
+ wait();
+ # Cleanup: Unget the command_lines (and args_refs)
+ unget_command_line(@command_lines);
+ if($sshlogin ne ":" and
+ $system_limit > $Global::default_simultaneous_sshlogins) {
+ $system_limit =
+ simultaneous_sshlogin_limit($sshlogin,$system_limit);
+ }
+ return $system_limit;
+}
+
+sub simultaneous_sshlogin {
+ # Using $sshlogin try to see if we can do $wanted_processes
+ # simultaneous logins
+ # (ssh host echo simultaneouslogin & ssh host echo simultaneouslogin & ...)|grep simul|wc -l
+ # Returns:
+ # Number of succesful logins
+ my $sshlogin = shift;
+ my $wanted_processes = shift;
+ my ($sshcmd,$serverlogin) = sshcommand_of_sshlogin($sshlogin);
+ my $cmd = "$sshcmd $serverlogin echo simultaneouslogin 2>&1 &"x$wanted_processes;
+ debug("Trying $wanted_processes logins at $serverlogin");
+ open (SIMUL, "($cmd)|grep simultaneouslogin | wc -l|") or die;
+ my $ssh_limit = <SIMUL>;
+ close SIMUL;
+ chomp $ssh_limit;
+ return $ssh_limit;
+}
+
+sub simultaneous_sshlogin_limit {
+ # Test by logging in wanted number of times simultaneously
+ # Returns:
+ # min($wanted_processes,$working_simultaneous_ssh_logins-1)
+ my $sshlogin = shift;
+ my $wanted_processes = shift;
+ my ($sshcmd,$serverlogin) = sshcommand_of_sshlogin($sshlogin);
+ # Try twice because it guesses wrong sometimes
+ # Choose the minimal
+ my $ssh_limit =
+ min(simultaneous_sshlogin($sshlogin,$wanted_processes),
+ simultaneous_sshlogin($sshlogin,$wanted_processes));
+ if($ssh_limit < $wanted_processes) {
+ print STDERR
+ ("Warning: ssh to $serverlogin only allows ",
+ "for $ssh_limit simultaneous logins.\n",
+ "You may raise this by changing ",
+ "/etc/ssh/sshd_config:MaxStartup on $serverlogin\n",
+ "Using only ",$ssh_limit-1," connections ",
+ "to avoid race conditions\n");
+ }
+ # Race condition can cause problem if using all sshs.
+ if($ssh_limit > 1) { $ssh_limit -= 1; }
+ return $ssh_limit;
+}
+
+sub enough_file_handles {
+ # check that we have enough filehandles available for starting
+ # another job
+ # Returns:
+ # 1 if ungrouped (thus not needing extra filehandles)
+ # 0 if too few filehandles
+ # 1 if enough filehandles
+ if($Global::grouped) {
+ my %fh;
+ my $enough_filehandles = 1;
+ # We need a filehandle for STDOUT and STDERR
+ # open3 uses 2 extra filehandles temporarily
+ for my $i (1..4) {
+ $enough_filehandles &&= open($fh{$i},"</dev/null");
+ }
+ for (values %fh) { close $_; }
+ return $enough_filehandles;
+ } else {
+ return 1;
+ }
+}
+
+sub user_requested_processes {
+ # Parse the number of processes that the user asked for using -j
+ # Returns:
+ # the number of processes to run on this sshlogin
+ my $opt_P = shift;
+ my $sshlogin = shift;
+ my $processes;
+ if(defined $opt_P) {
+ if($opt_P =~ /^\+(\d+)$/) {
+ # E.g. -P +2
+ my $j = $1;
+ $processes =
+ no_of_processing_units_sshlogin($sshlogin) + $j;
+ } elsif ($opt_P =~ /^-(\d+)$/) {
+ # E.g. -P -2
+ my $j = $1;
+ $processes =
+ no_of_processing_units_sshlogin($sshlogin) - $j;
+ } elsif ($opt_P =~ /^(\d+)\%$/) {
+ my $j = $1;
+ $processes =
+ no_of_processing_units_sshlogin($sshlogin) * $j / 100;
+ } elsif ($opt_P =~ /^(\d+)$/) {
+ $processes = $1;
+ if($processes == 0) {
+ # -P 0 = infinity (or at least close)
+ $processes = 2**31;
+ }
+ } elsif (-f $opt_P) {
+ $Global::max_procs_file = $opt_P;
+ $Global::max_procs_file_last_mod = (stat($Global::max_procs_file))[9];
+ if(open(IN, $Global::max_procs_file)) {
+ my $opt_P_file = join("",<IN>);
+ close IN;
+ $processes = user_requested_processes($opt_P_file);
+ } else {
+ print STDERR "Cannot open $opt_P\n";
+ exit(255);
+ }
+ } else {
+ print STDERR "Parsing of --jobs/-j/--max-procs/-P failed\n";
+ die_usage();
+ }
+ if($processes < 1) {
+ $processes = 1;
+ }
+ }
+ return $processes;
+}
+
+sub no_of_processing_units_sshlogin {
+ # Number of processing units (CPUs or cores) at this sshlogin
+ # Returns:
+ # number of CPUs or cores at the sshlogin
+ my $sshlogin = shift;
+ my ($sshcmd,$serverlogin) = sshcommand_of_sshlogin($sshlogin);
+ if(not $Global::host{$sshlogin}{'ncpus'}) {
+ if($serverlogin eq ":") {
+ if($::opt_use_cpus_instead_of_cores) {
+ $Global::host{$sshlogin}{'ncpus'} = no_of_cpus();
+ } else {
+ $Global::host{$sshlogin}{'ncpus'} = no_of_cores();
+ }
+ } else {
+ my $ncpu;
+ if($::opt_use_cpus_instead_of_cores) {
+ $ncpu = qx(echo|$sshcmd $serverlogin parallel --number-of-cpus);
+ chomp($ncpu);
+ } else {
+ $ncpu = qx(echo|$sshcmd $serverlogin parallel --number-of-cores);
+ chomp($ncpu);
+ }
+ if($ncpu =~ /^[0-9]+$/) {
+ $Global::host{$sshlogin}{'ncpus'} = $ncpu;
+ } else {
+ print STDERR ("Warning: Could not figure out ",
+ "number of cpus on $serverlogin. Using 1");
+ $Global::host{$sshlogin}{'ncpus'} = 1;
+ }
+ }
+ }
+ return $Global::host{$sshlogin}{'ncpus'};
+}
+
+sub no_of_cpus {
+ # Returns:
+ # Number of physical CPUs
+ if(not $Private::no_of_cpus) {
+ local $/="\n"; # If delimiter is set, then $/ will be wrong
+ my $no_of_cpus = (no_of_cpus_freebsd()
+ || no_of_cpus_darwin()
+ || no_of_cpus_solaris()
+ || no_of_cpus_gnu_linux()
+ );
+ if($no_of_cpus) {
+ $Private::no_of_cpus = $no_of_cpus;
+ } else {
+ warn("Cannot figure out number of cpus. Using 1");
+ $Private::no_of_cpus = 1;
+ }
+ }
+ return $Private::no_of_cpus;
+}
+
+sub no_of_cores {
+ # Returns:
+ # Number of CPU cores
+ if(not $Private::no_of_cores) {
+ local $/="\n"; # If delimiter is set, then $/ will be wrong
+ my $no_of_cores = (no_of_cores_freebsd()
+ || no_of_cores_darwin()
+ || no_of_cores_solaris()
+ || no_of_cores_gnu_linux()
+ );
+ if($no_of_cores) {
+ $Private::no_of_cores = $no_of_cores;
+ } else {
+ warn("Cannot figure out number of CPU cores. Using 1");
+ $Private::no_of_cores = 1;
+ }
+ }
+ return $Private::no_of_cores;
+}
+
+sub no_of_cpus_gnu_linux {
+ # Returns:
+ # Number of physical CPUs on GNU/Linux
+ my $no_of_cpus;
+ if(-e "/proc/cpuinfo") {
+ $no_of_cpus = 0;
+ my %seen;
+ open(IN,"cat /proc/cpuinfo|") || return undef;
+ while(<IN>) {
+ if(/^physical id.*[:](.*)/ and not $seen{$1}++) {
+ $no_of_cpus++;
+ }
+ }
+ close IN;
+ }
+ return $no_of_cpus;
+}
+
+sub no_of_cores_gnu_linux {
+ # Returns:
+ # Number of CPU cores on GNU/Linux
+ my $no_of_cores;
+ if(-e "/proc/cpuinfo") {
+ $no_of_cores = 0;
+ open(IN,"cat /proc/cpuinfo|") || return undef;
+ while(<IN>) {
+ /^processor.*[:]/ and $no_of_cores++;
+ }
+ close IN;
+ }
+ return $no_of_cores;
+}
+
+sub no_of_cpus_darwin {
+ # Returns:
+ # Number of physical CPUs on Mac Darwin
+ my $no_of_cpus = `sysctl -a hw 2>/dev/null | grep -w physicalcpu | awk '{ print \$2 }'`;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_darwin {
+ # Returns:
+ # Number of CPU cores on Mac Darwin
+ my $no_of_cores = `sysctl -a hw 2>/dev/null | grep -w logicalcpu | awk '{ print \$2 }'`;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_freebsd {
+ # Returns:
+ # Number of physical CPUs on FreeBSD
+ my $no_of_cpus = `sysctl hw.ncpu 2>/dev/null | awk '{ print \$2 }'`;
+ return $no_of_cpus;
+}
+
+sub no_of_cores_freebsd {
+ # Returns:
+ # Number of CPU cores on FreeBSD
+ my $no_of_cores = `sysctl -a hw 2>/dev/null | grep -w logicalcpu | awk '{ print \$2 }'`;
+ return $no_of_cores;
+}
+
+sub no_of_cpus_solaris {
+ # Returns:
+ # Number of physical CPUs on Solaris
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = `/usr/sbin/psrinfo`;
+ if($#psrinfo >= 0) {
+ return $#psrinfo +1;
+ }
+ }
+ if(-x "/usr/sbin/prtconf") {
+ my @prtconf = `/usr/sbin/prtconf | grep cpu..instance`;
+ if($#prtconf >= 0) {
+ return $#prtconf +1;
+ }
+ }
+ return undef;
+}
+
+sub no_of_cores_solaris {
+ # Returns:
+ # Number of CPU cores on Solaris
+ if(-x "/usr/sbin/psrinfo") {
+ my @psrinfo = `/usr/sbin/psrinfo`;
+ if($#psrinfo >= 0) {
+ return $#psrinfo +1;
+ }
+ }
+ if(-x "/usr/sbin/prtconf") {
+ my @prtconf = `/usr/sbin/prtconf | grep cpu..instance`;
+ if($#prtconf >= 0) {
+ return $#prtconf +1;
+ }
+ }
+ return undef;
+}
+
+#
+# General useful library functions
+#
+
+sub min {
+ # Returns:
+ # Minimum value of array
+ my $min;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $min or do { $min = $_; next; }; # Set $_ to the first non-undef
+ $min = ($min < $_) ? $min : $_;
+ }
+ return $min;
+}
+
+sub max {
+ # Returns:
+ # Maximum value of array
+ my $max;
+ for (@_) {
+ # Skip undefs
+ defined $_ or next;
+ defined $max or do { $max = $_; next; }; # Set $_ to the first non-undef
+ $max = ($max > $_) ? $max : $_;
+ }
+ return $max;
+}
+
+sub sum {
+ # Returns:
+ # Sum of values of array
+ my @args = @_;
+ my $sum = 0;
+ for (@args) {
+ # Skip undefs
+ $_ and do { $sum += $_; }
+ }
+ return $sum;
+}
+
+sub undef_as_zero {
+ my $a = shift;
+ return $a ? $a : 0;
+}
+
+sub hostname {
+ if(not $Private::hostname) {
+ my $hostname = `hostname`;
+ chomp($hostname);
+ $Private::hostname = $hostname || "nohostname";
+ }
+ return $Private::hostname;
+}
+
+sub __RUNNING_AND_PRINTING_THE_JOBS__ {}
+
+# Variable structure:
+#
+# $Global::failed{$clean_command}{'count'}{$sshlogin} = number of times failed on this sshlogin
+# $Global::failed{$clean_command}{'seq'} = original sequence number
+# $Global::running{$pid}{'seq'} = printsequence
+# $Global::running{$pid}{sshlogin} = server to run on
+# $Global::running{$pid}{'exitstatus'} = exit status
+# $Global::running{$pid}{'out'} = stdout filehandle
+# $Global::running{$pid}{'err'} = stderr filehandle
+# $Global::running{$pid}{'command'} = command being run (including rsync/ssh and args)
+# $Global::running{$pid}{'cleancommand'} = command being run (excluding rsync/ssh but including args)
+# $Global::host{$sshlogin}{'no_of_running'} = number of currently running jobs
+# $Global::host{$sshlogin}{'completed'} = number of completed jobs
+# $Global::host{$sshlogin}{'ncpus'} = number of CPUs (or CPU cores)
+# $Global::host{$sshlogin}{'maxlength'} = max line length (currently buggy for remote)
+# $Global::host{$sshlogin}{'max_no_of_running'} = max parallel running jobs
+# $Global::host{$sshlogin}{'sshcmd'} = command to use as ssh
+# $Global::host{$sshlogin}{'serverlogin'} = [email protected]
+# $Global::total_running = total number of running jobs
+# $Global::total_started = total jobs started
+# $Global::total_jobs = total jobs to be started at all
+# $Global::total_completed = total jobs completed
+# @Global::unget_arg = arguments quoted as needed ready to use
+# @Global::unget_lines = raw argument lines - needs quoting and splitting
+#
+# Flow:
+# Get_line: Line is read from file or stdin. Delimiter is chopped
+# Get_line_argv: Line is read from ARGV - no delimiter
+# Get column: Multiple -a or --colsep
+# Get column: @ARGV
+# Quote column:
+# get_quoted_args
+
+sub init_run_jobs {
+ # Remember the original STDOUT and STDERR
+ # Returns: N/A
+ open $Global::original_stdout, ">&STDOUT" or die "Can't dup STDOUT: $!";
+ open $Global::original_stderr, ">&STDERR" or die "Can't dup STDERR: $!";
+ open $Global::original_stdin, "<&STDIN" or die "Can't dup STDIN: $!";
+ $Global::total_running = 0;
+ $Global::total_started = 0;
+ $Global::total_completed = 0;
+ $Global::tty_taken = 0;
+ $SIG{USR1} = \&list_running_jobs;
+ $SIG{USR2} = \&toggle_progress;
+ $Global::original_sigterm = $SIG{TERM};
+ $SIG{TERM} = \&start_no_new_jobs;
+ if(@::opt_basefile) {
+ setup_basefile();
+ }
+}
+
+sub login_and_host {
+ # Returns:
+ my $sshlogin = shift;
+ $sshlogin =~ /(\S+$)/ or die;
+ return $1;
+}
+
+sub drain_job_queue {
+ # Returns: N/A
+ if($::opt_progress) {
+ do_not_reap();
+ print init_progress();
+ reap_if_needed();
+ }
+ my $last_header="";
+ while($Global::total_running > 0) {
+ debug("jobs running: ",$Global::total_running," Memory usage:".my_memory_usage()."\n");
+ sleep 1;
+ reaper(); # Some systems fail to catch the SIGCHLD
+ if($::opt_progress) {
+ my %progress = progress();
+ do_not_reap();
+ if($last_header ne $progress{'header'}) {
+ print "\n",$progress{'header'},"\n";
+ $last_header = $progress{'header'};
+ }
+ print "\r",$progress{'status'};
+ reap_if_needed();
+ }
+ }
+ if($::opt_progress) {
+ print "\n";
+ }
+}
+
+sub toggle_progress {
+ # Turn on/off progress view
+ # Returns: N/A
+ $::opt_progress = not $::opt_progress;
+ if($::opt_progress) {
+ print init_progress();
+ }
+}
+
+sub init_progress {
+ # Returns:
+ # list of computers for progress output
+ $|=1;
+ my %progress = progress();
+ return ("\nComputers / CPU cores / Max jobs to run\n",
+ $progress{'workerlist'},"\n");
+}
+
+sub progress {
+ # Returns:
+ # list of workers
+ # header that will fit on the screen
+ # status message that will fit on the screen
+ my $termcols = terminal_columns();
+ my ($status, $header)=("x"x($termcols+1),"");
+ my @workers = sort keys %Global::host;
+ my %sshlogin = map { $_ eq ":" ? ($_=>"local") : ($_=>$_) } @workers;
+ my $workerno = 1;
+ my %workerno = map { ($_=>$workerno++) } @workers;
+ my $workerlist = join("\n", map {
+ $workerno{$_}.":".$sshlogin{$_} ." / ".
+ ($Global::host{$_}{'ncpus'} || "-") ." / ".
+ $Global::host{$_}{'max_no_of_running'}
+ } @workers);
+ my $eta = "";
+ if($::opt_eta) {
+ my $completed = 0;
+ for(@workers) { $completed += ($Global::host{$_}{'completed'}||0) }
+ if($completed) {
+ $Private::first_completed ||= time;
+ my $avgtime = (time-$Private::first_completed)/$completed;
+ my $this_eta = ($Global::total_jobs - $completed) * $avgtime;
+ $Private::eta ||= $this_eta;
+ # Smooth the eta so it does not jump wildly
+ $Private::eta = 0.9 * $Private::eta + 0.1 * $this_eta;
+ $eta = sprintf("ETA: %ds ", $Private::eta);
+ }
+ }
+
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX%/XX.Xs sshlogin2:XX/XX/XX%/XX.Xs sshlogin3:XX/XX/XX%/XX.Xs
+ $header = "Computer:jobs running/jobs completed/%of started jobs/Average seconds to complete";
+ $status = $eta .
+ join(" ",map
+ {
+ if($Global::total_started) {
+ my $completed = ($Global::host{$_}{'completed'}||0);
+ my $running = $Global::host{$_}{'no_of_running'};
+ my $time = $completed ? (time-$^T)/($completed) : "0";
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ $sshlogin{$_}, $running, $completed,
+ ($running+$completed)*100
+ / $Global::total_started, $time);
+ }
+ } @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX%/XX.Xs 2:XX/XX/XX%/XX.Xs 3:XX/XX/XX%/XX.Xs 4:XX/XX/XX%/XX.Xs
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta .
+ join(" ",map
+ {
+ my $completed = ($Global::host{$_}{'completed'}||0);
+ my $running = $Global::host{$_}{'no_of_running'};
+ my $time = $completed ? (time-$^T)/($completed) : "0";
+ sprintf("%s:%d/%d/%d%%/%.1fs ",
+ $workerno{$_}, $running, $completed,
+ ($running+$completed)*100
+ / $Global::total_started, $time);
+ } @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d/%d%%",
+ $sshlogin{$_}, $Global::host{$_}{'no_of_running'},
+ ($Global::host{$_}{'completed'}||0),
+ ($Global::host{$_}{'no_of_running'}+
+ ($Global::host{$_}{'completed'}||0))*100
+ / $Global::total_started) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX/XX% 2:XX/XX/XX% 3:XX/XX/XX% 4:XX/XX/XX% 5:XX/XX/XX% 6:XX/XX/XX%
+ $header = "Computer:jobs running/jobs completed/%of started jobs";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d/%d%%",
+ $workerno{$_}, $Global::host{$_}{'no_of_running'},
+ ($Global::host{$_}{'completed'}||0),
+ ($Global::host{$_}{'no_of_running'}+
+ ($Global::host{$_}{'completed'}||0))*100
+ / $Global::total_started) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX/XX% sshlogin2:XX/XX/XX% sshlogin3:XX/XX sshlogin4:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d",
+ $sshlogin{$_}, $Global::host{$_}{'no_of_running'},
+ ($Global::host{$_}{'completed'}||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX/XX sshlogin2:XX/XX sshlogin3:XX/XX sshlogin4:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d",
+ $sshlogin{$_}, $Global::host{$_}{'no_of_running'},
+ ($Global::host{$_}{'completed'}||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX/XX 2:XX/XX 3:XX/XX 4:XX/XX 5:XX/XX 6:XX/XX
+ $header = "Computer:jobs running/jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d/%d",
+ $workerno{$_}, $Global::host{$_}{'no_of_running'},
+ ($Global::host{$_}{'completed'}||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # sshlogin1:XX sshlogin2:XX sshlogin3:XX sshlogin4:XX sshlogin5:XX
+ $header = "Computer:jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d",
+ $sshlogin{$_},
+ ($Global::host{$_}{'completed'}||0)) }
+ @workers);
+ }
+ if(length $status > $termcols) {
+ # 1:XX 2:XX 3:XX 4:XX 5:XX 6:XX
+ $header = "Computer:jobs completed";
+ $status = $eta .
+ join(" ",map
+ { sprintf("%s:%d",
+ $workerno{$_},
+ ($Global::host{$_}{'completed'}||0)) }
+ @workers);
+ }
+ return ("workerlist" => $workerlist, "header" => $header, "status" => $status);
+}
+
+sub terminal_columns {
+ # Get the number of columns of the display
+ # Returns:
+ # number of columns of the screen
+ if(not $Private::columns) {
+ $Private::columns = $ENV{'COLUMNS'};
+ if(not $Private::columns) {
+ my $resize = qx{ resize 2>/dev/null };
+ $resize =~ /COLUMNS=(\d+);/ and do { $Private::columns = $1; };
+ }
+ $Private::columns ||= 80;
+ }
+ return $Private::columns;
+}
+
+sub start_more_jobs {
+ # Returns:
+ # number of jobs started
+ my $jobs_started = 0;
+ if(not $Global::start_no_new_jobs) {
+ if($Global::max_procs_file) {
+ my $mtime = (stat($Global::max_procs_file))[9];
+ if($mtime > $Global::max_procs_file_last_mod) {
+ $Global::max_procs_file_last_mod = $mtime;
+ compute_number_of_processes_for_sshlogins();
+ }
+ }
+ for my $sshlogin (keys %Global::host) {
+ debug("Running jobs on $sshlogin: $Global::host{$sshlogin}{'no_of_running'}\n");
+ while ($Global::host{$sshlogin}{'no_of_running'} <
+ $Global::host{$sshlogin}{'max_no_of_running'}) {
+ if(start_another_job($sshlogin) == 0) {
+ # No more jobs to start
+ last;
+ }
+ $Global::host{$sshlogin}{'no_of_running'}++;
+ $jobs_started++;
+ }
+ debug("Running jobs on $sshlogin: $Global::host{$sshlogin}{'no_of_running'}\n");
+ }
+ }
+ return $jobs_started;
+}
+
+sub start_another_job {
+ # Grab a job from @Global::command, start it at sshlogin
+ # and remember the pid, the STDOUT and the STDERR handles
+ # Returns:
+ # 1 if another jobs was started
+ # 0 otherwise
+ my $sshlogin = shift;
+ # Do we have enough file handles to start another job?
+ if(enough_file_handles()) {
+ my ($command,$clean_command) = get_command_line_with_sshlogin($sshlogin);
+ if(defined $command) {
+ debug("Command to run on '$sshlogin': $command\n");
+ my %jobinfo = start_job($command,$sshlogin,$clean_command);
+ if(%jobinfo) {
+ $Global::running{$jobinfo{"pid"}} = \%jobinfo;
+ debug("Started as seq ".$jobinfo{'seq'},"\n");
+ return 1;
+ } else {