diff --git a/export.el b/export.el new file mode 100644 index 0000000..8e60c24 --- /dev/null +++ b/export.el @@ -0,0 +1,9 @@ +;; In elisp, default-directory is the current directory +(add-to-list 'load-path default-directory) +;; If htmlize is outdated, just replace htmlize.el with the newer version lmao. +(require 'htmlize) + +(load-theme 'tsdh-light) + +(find-file "main.org") +(org-html-export-to-html) diff --git a/htmlize.el b/htmlize.el new file mode 100644 index 0000000..b158a65 --- /dev/null +++ b/htmlize.el @@ -0,0 +1,1864 @@ +;;; htmlize.el --- Convert buffer text and decorations to HTML. -*- lexical-binding: t -*- + +;; Copyright (C) 1997-2003,2005,2006,2009,2011,2012,2014,2017,2018,2020 Hrvoje Niksic + +;; Author: Hrvoje Niksic +;; Homepage: https://github.com/hniksic/emacs-htmlize +;; Keywords: hypermedia, extensions +;; Version: 1.57 + +;; This program is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2, or (at your option) +;; any later version. + +;; This program is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with this program; see the file COPYING. If not, write to the +;; Free Software Foundation, Inc., 59 Temple Place - Suite 330, +;; Boston, MA 02111-1307, USA. + +;;; Commentary: + +;; This package converts the buffer text and the associated +;; decorations to HTML. Mail to to discuss +;; features and additions. All suggestions are more than welcome. + +;; To use it, just switch to the buffer you want HTML-ized and type +;; `M-x htmlize-buffer'. You will be switched to a new buffer that +;; contains the resulting HTML code. You can edit and inspect this +;; buffer, or you can just save it with C-x C-w. `M-x htmlize-file' +;; will find a file, fontify it, and save the HTML version in +;; FILE.html, without any additional intervention. `M-x +;; htmlize-many-files' allows you to htmlize any number of files in +;; the same manner. `M-x htmlize-many-files-dired' does the same for +;; files marked in a dired buffer. + +;; htmlize supports three types of HTML output, selected by setting +;; `htmlize-output-type': `css', `inline-css', and `font'. In `css' +;; mode, htmlize uses cascading style sheets to specify colors; it +;; generates classes that correspond to Emacs faces and uses ... to color parts of text. In this mode, the +;; produced HTML is valid under the 4.01 strict DTD, as confirmed by +;; the W3C validator. `inline-css' is like `css', except the CSS is +;; put directly in the STYLE attribute of the SPAN element, making it +;; possible to paste the generated HTML into existing HTML documents. +;; In `font' mode, htmlize uses ... to +;; colorize HTML, which is not standard-compliant, but works better in +;; older browsers. `css' mode is the default. + +;; You can also use htmlize from your Emacs Lisp code. When called +;; non-interactively, `htmlize-buffer' and `htmlize-region' will +;; return the resulting HTML buffer, but will not change current +;; buffer or move the point. htmlize will do its best to work on +;; non-windowing Emacs sessions but the result will be limited to +;; colors supported by the terminal. + +;; htmlize aims for compatibility with older Emacs versions. Please +;; let me know if it doesn't work on the version of GNU Emacs that you +;; are using. The package relies on the presence of CL extensions; +;; please don't try to remove that dependency. I see no practical +;; problems with using the full power of the CL extensions, except +;; that one might learn to like them too much. + +;; The latest version is available at: +;; +;; +;; +;; + +;; Thanks go to the many people who have sent reports and contributed +;; comments, suggestions, and fixes. They include Ron Gut, Bob +;; Weiner, Toni Drabik, Peter Breton, Ville Skytta, Thomas Vogels, +;; Juri Linkov, Maciek Pasternacki, and many others. + +;; User quotes: "You sir, are a sick, sick, _sick_ person. :)" +;; -- Bill Perry, author of Emacs/W3 + + +;;; Code: + +(require 'cl-lib) +(eval-when-compile + (defvar font-lock-auto-fontify) + (defvar font-lock-support-mode) + (defvar global-font-lock-mode)) + +(defconst htmlize-version "1.57") + +(defgroup htmlize nil + "Convert buffer text and faces to HTML." + :group 'hypermedia) + +(defcustom htmlize-head-tags "" + "Additional tags to insert within HEAD of the generated document." + :type 'string + :group 'htmlize) + +(defcustom htmlize-output-type 'css + "Output type of generated HTML, one of `css', `inline-css', or `font'. +When set to `css' (the default), htmlize will generate a style sheet +with description of faces, and use it in the HTML document, specifying +the faces in the actual text with . + +When set to `inline-css', the style will be generated as above, but +placed directly in the STYLE attribute of the span ELEMENT: . This makes it easier to paste the resulting HTML to +other documents. + +When set to `font', the properties will be set using layout tags +, , , , and . + +`css' output is normally preferred, but `font' is still useful for +supporting old, pre-CSS browsers, and both `inline-css' and `font' for +easier embedding of colorized text in foreign HTML documents (no style +sheet to carry around)." + :type '(choice (const css) (const inline-css) (const font)) + :group 'htmlize) + +(defcustom htmlize-use-images t + "Whether htmlize generates `img' for images attached to buffer contents." + :type 'boolean + :group 'htmlize) + +(defcustom htmlize-force-inline-images nil + "Non-nil means generate all images inline using data URLs. +Normally htmlize converts image descriptors with :file properties to +relative URIs, and those with :data properties to data URIs. With this +flag set, the images specified as a file name are loaded into memory and +embedded in the HTML as data URIs." + :type 'boolean + :group 'htmlize) + +(defcustom htmlize-max-alt-text 100 + "Maximum size of text to use as ALT text in images. + +Normally when htmlize encounters text covered by the `display' property +that specifies an image, it generates an `alt' attribute containing the +original text. If the text is larger than `htmlize-max-alt-text' characters, +this will not be done." + :type 'integer + :group 'htmlize) + +(defcustom htmlize-transform-image 'htmlize-default-transform-image + "Function called to modify the image descriptor. + +The function is called with the image descriptor found in the buffer and +the text the image is supposed to replace. It should return a (possibly +different) image descriptor property list or a replacement string to use +instead of of the original buffer text. + +Returning nil is the same as returning the original text." + :type 'boolean + :group 'htmlize) + +(defcustom htmlize-generate-hyperlinks t + "Non-nil means auto-generate the links from URLs and mail addresses in buffer. + +This is on by default; set it to nil if you don't want htmlize to +autogenerate such links. Note that this option only turns off automatic +search for contents that looks like URLs and converting them to links. +It has no effect on whether htmlize respects the `htmlize-link' property." + :type 'boolean + :group 'htmlize) + +(defcustom htmlize-hyperlink-style " + a { + color: inherit; + background-color: inherit; + font: inherit; + text-decoration: inherit; + } + a:hover { + text-decoration: underline; + } +" + "The CSS style used for hyperlinks when in CSS mode." + :type 'string + :group 'htmlize) + +(defcustom htmlize-replace-form-feeds t + "Non-nil means replace form feeds in source code with HTML separators. +Form feeds are the ^L characters at line beginnings that are sometimes +used to separate sections of source code. If this variable is set to +`t', form feed characters are replaced with the
separator. If this +is a string, it specifies the replacement to use. Note that
 is
+temporarily closed before the separator is inserted, so the default
+replacement is effectively \"

\".  If you specify
+another replacement, don't forget to close and reopen the 
 if you
+want the output to remain valid HTML.
+
+If you need more elaborate processing, set this to nil and use
+htmlize-after-hook."
+  :type 'boolean
+  :group 'htmlize)
+
+(defcustom htmlize-html-charset nil
+  "The charset declared by the resulting HTML documents.
+When non-nil, causes htmlize to insert the following in the HEAD section
+of the generated HTML:
+
+  
+
+where CHARSET is the value you've set for htmlize-html-charset.  Valid
+charsets are defined by MIME and include strings like \"iso-8859-1\",
+\"iso-8859-15\", \"utf-8\", etc.
+
+If you are using non-Latin-1 charsets, you might need to set this for
+your documents to render correctly.  Also, the W3C validator requires
+submitted HTML documents to declare a charset.  So if you care about
+validation, you can use this to prevent the validator from bitching.
+
+Needless to say, if you set this, you should actually make sure that
+the buffer is in the encoding you're claiming it is in.  (This is
+normally achieved by using the correct file coding system for the
+buffer.)  If you don't understand what that means, you should probably
+leave this option in its default setting."
+  :type '(choice (const :tag "Unset" nil)
+		 string)
+  :group 'htmlize)
+
+(defcustom htmlize-convert-nonascii-to-entities t
+  "Whether non-ASCII characters should be converted to HTML entities.
+
+When this is non-nil, characters with codes in the 128-255 range will be
+considered Latin 1 and rewritten as \"&#CODE;\".  Characters with codes
+above 255 will be converted to \"&#UCS;\", where UCS denotes the Unicode
+code point of the character.  If the code point cannot be determined,
+the character will be copied unchanged, as would be the case if the
+option were nil.
+
+When the option is nil, the non-ASCII characters are copied to HTML
+without modification.  In that case, the web server and/or the browser
+must be set to understand the encoding that was used when saving the
+buffer.  (You might also want to specify it by setting
+`htmlize-html-charset'.)
+
+Note that in an HTML entity \"&#CODE;\", CODE is always a UCS code point,
+which has nothing to do with the charset the page is in.  For example,
+\"©\" *always* refers to the copyright symbol, regardless of charset
+specified by the META tag or the charset sent by the HTTP server.  In
+other words, \"©\" is exactly equivalent to \"©\".
+
+For most people htmlize will work fine with this option left at the
+default setting; don't change it unless you know what you're doing."
+  :type 'sexp
+  :group 'htmlize)
+
+(defcustom htmlize-ignore-face-size 'absolute
+  "Whether face size should be ignored when generating HTML.
+If this is nil, face sizes are used.  If set to t, sizes are ignored
+If set to `absolute', only absolute size specifications are ignored.
+Please note that font sizes only work with CSS-based output types."
+  :type '(choice (const :tag "Don't ignore" nil)
+		 (const :tag "Ignore all" t)
+		 (const :tag "Ignore absolute" absolute))
+  :group 'htmlize)
+
+(defcustom htmlize-css-name-prefix ""
+  "The prefix used for CSS names.
+The CSS names that htmlize generates from face names are often too
+generic for CSS files; for example, `font-lock-type-face' is transformed
+to `type'.  Use this variable to add a prefix to the generated names.
+The string \"htmlize-\" is an example of a reasonable prefix."
+  :type 'string
+  :group 'htmlize)
+
+(defcustom htmlize-use-rgb-txt t
+  "Whether `rgb.txt' should be used to convert color names to RGB.
+
+This conversion means determining, for instance, that the color
+\"IndianRed\" corresponds to the (205, 92, 92) RGB triple.  `rgb.txt'
+is the X color database that maps hundreds of color names to such RGB
+triples.  When this variable is non-nil, `htmlize' uses `rgb.txt' to
+look up color names.
+
+If this variable is nil, htmlize queries Emacs for RGB components of
+colors using `color-instance-rgb-components' and `color-values'.
+This can yield incorrect results on non-true-color displays.
+
+If the `rgb.txt' file is not found (which will be the case if you're
+running Emacs on non-X11 systems), this option is ignored."
+  :type 'boolean
+  :group 'htmlize)
+
+(defvar htmlize-face-overrides nil
+  "Overrides for face definitions.
+
+Normally face definitions are taken from Emacs settings for fonts
+in the current frame.  For faces present in this plist, the
+definitions will be used instead.  Keys in the plist are symbols
+naming the face and values are the overriding definitions.  For
+example:
+
+  (setq htmlize-face-overrides
+        '(font-lock-warning-face \"black\"
+          font-lock-function-name-face \"red\"
+          font-lock-comment-face \"blue\"
+          default (:foreground \"dark-green\" :background \"yellow\")))
+
+This variable can be also be `let' bound when running `htmlize-buffer'.")
+
+(defcustom htmlize-untabify t
+  "Non-nil means untabify buffer contents during htmlization."
+  :type 'boolean
+  :group 'htmlize)
+
+(defcustom htmlize-html-major-mode nil
+  "The mode the newly created HTML buffer will be put in.
+Set this to nil if you prefer the default (fundamental) mode."
+  :type '(radio (const :tag "No mode (fundamental)" nil)
+		 (function-item html-mode)
+		 (function :tag "User-defined major mode"))
+  :group 'htmlize)
+
+(defcustom htmlize-pre-style nil
+  "When non-nil, `
' tags will be decorated with style
+information in `font' and `inline-css' modes. This allows a
+consistent background for captures of regions."
+  :type 'boolean
+  :group 'htmlize)
+
+(defvar htmlize-before-hook nil
+  "Hook run before htmlizing a buffer.
+The hook functions are run in the source buffer (not the resulting HTML
+buffer).")
+
+(defvar htmlize-after-hook nil
+  "Hook run after htmlizing a buffer.
+Unlike `htmlize-before-hook', these functions are run in the generated
+HTML buffer.  You may use them to modify the outlook of the final HTML
+output.")
+
+(defvar htmlize-file-hook nil
+  "Hook run by `htmlize-file' after htmlizing a file, but before saving it.")
+
+(defvar htmlize-buffer-places)
+
+;;; Some cross-Emacs compatibility.
+
+;; We need a function that efficiently finds the next change of a
+;; property regardless of whether the change occurred because of a
+;; text property or an extent/overlay.
+(defun htmlize-next-change (pos prop &optional limit)
+  (if prop
+      (next-single-char-property-change pos prop nil limit)
+    (next-char-property-change pos limit)))
+
+(defun htmlize-overlay-faces-at (pos)
+  (delq nil (mapcar (lambda (o) (overlay-get o 'face)) (overlays-at pos))))
+
+(defun htmlize-next-face-change (pos &optional limit)
+  ;; (htmlize-next-change pos 'face limit) would skip over entire
+  ;; overlays that specify the `face' property, even when they
+  ;; contain smaller text properties that also specify `face'.
+  ;; Emacs display engine merges those faces, and so must we.
+  (or limit
+      (setq limit (point-max)))
+  (let ((next-prop (next-single-property-change pos 'face nil limit))
+        (overlay-faces (htmlize-overlay-faces-at pos)))
+    (while (progn
+             (setq pos (next-overlay-change pos))
+             (and (< pos next-prop)
+                  (equal overlay-faces (htmlize-overlay-faces-at pos)))))
+    (setq pos (min pos next-prop))
+    ;; Additionally, we include the entire region that specifies the
+    ;; `display' property.
+    (when (get-char-property pos 'display)
+      (setq pos (next-single-char-property-change pos 'display nil limit)))
+    pos))
+
+(defmacro htmlize-lexlet (&rest letforms)
+  (declare (indent 1) (debug let))
+  (if (and (boundp 'lexical-binding)
+           lexical-binding)
+      `(let ,@letforms)
+    ;; cl extensions have a macro implementing lexical let
+    `(lexical-let ,@letforms)))
+
+
+;;; Transformation of buffer text: HTML escapes, untabification, etc.
+
+(defvar htmlize-basic-character-table
+  ;; Map characters in the 0-127 range to either one-character strings
+  ;; or to numeric entities.
+  (let ((table (make-vector 128 ?\0)))
+    ;; Map characters in the 32-126 range to themselves, others to
+    ;; &#CODE entities;
+    (dotimes (i 128)
+      (setf (aref table i) (if (and (>= i 32) (<= i 126))
+			       (char-to-string i)
+			     (format "&#%d;" i))))
+    ;; Set exceptions manually.
+    (setf
+     ;; Don't escape newline, carriage return, and TAB.
+     (aref table ?\n) "\n"
+     (aref table ?\r) "\r"
+     (aref table ?\t) "\t"
+     ;; Escape &, <, and >.
+     (aref table ?&) "&"
+     (aref table ?<) "<"
+     (aref table ?>) ">"
+     ;; Not escaping '"' buys us a measurable speedup.  It's only
+     ;; necessary to quote it for strings used in attribute values,
+     ;; which htmlize doesn't typically do.
+     ;(aref table ?\") """
+     )
+    table))
+
+;; A cache of HTML representation of non-ASCII characters.  Depending
+;; on the setting of `htmlize-convert-nonascii-to-entities', this maps
+;; non-ASCII characters to either "&#;" or "" (mapconcat's
+;; mapper must always return strings).  It's only filled as characters
+;; are encountered, so that in a buffer with e.g. French text, it will
+;; only ever contain French accented characters as keys.  It's cleared
+;; on each entry to htmlize-buffer-1 to allow modifications of
+;; `htmlize-convert-nonascii-to-entities' to take effect.
+(defvar htmlize-extended-character-cache (make-hash-table :test 'eq))
+
+(defun htmlize-protect-string (string)
+  "HTML-protect string, escaping HTML metacharacters and I18N chars."
+  ;; Only protecting strings that actually contain unsafe or non-ASCII
+  ;; chars removes a lot of unnecessary funcalls and consing.
+  (if (not (string-match "[^\r\n\t -%'-;=?-~]" string))
+      string
+    (mapconcat (lambda (char)
+		 (cond
+		  ((< char 128)
+		   ;; ASCII: use htmlize-basic-character-table.
+		   (aref htmlize-basic-character-table char))
+		  ((gethash char htmlize-extended-character-cache)
+		   ;; We've already seen this char; return the cached
+		   ;; string.
+		   )
+		  ((not htmlize-convert-nonascii-to-entities)
+		   ;; If conversion to entities is not desired, always
+		   ;; copy the char literally.
+		   (setf (gethash char htmlize-extended-character-cache)
+			 (char-to-string char)))
+		  ((< char 256)
+		   ;; Latin 1: no need to call encode-char.
+		   (setf (gethash char htmlize-extended-character-cache)
+			 (format "&#%d;" char)))
+		  ((encode-char char 'ucs)
+                   ;; Must check if encode-char works for CHAR;
+                   ;; it fails for Arabic and possibly elsewhere.
+		   (setf (gethash char htmlize-extended-character-cache)
+			 (format "&#%d;" (encode-char char 'ucs))))
+		  (t
+		   ;; encode-char doesn't work for this char.  Copy it
+		   ;; unchanged and hope for the best.
+		   (setf (gethash char htmlize-extended-character-cache)
+			 (char-to-string char)))))
+	       string "")))
+
+(defun htmlize-attr-escape (string)
+  ;; Like htmlize-protect-string, but also escapes double-quoted
+  ;; strings to make it usable in attribute values.
+  (setq string (htmlize-protect-string string))
+  (if (not (string-match "\"" string))
+      string
+    (mapconcat (lambda (char)
+                 (if (eql char ?\")
+                     """
+                   (char-to-string char)))
+               string "")))
+
+(defsubst htmlize-concat (list)
+  (if (and (consp list) (null (cdr list)))
+      ;; Don't create a new string in the common case where the list only
+      ;; consists of one element.
+      (car list)
+    (apply #'concat list)))
+
+(defun htmlize-format-link (linkprops text)
+  (let ((uri (if (stringp linkprops)
+                 linkprops
+               (plist-get linkprops :uri)))
+        (escaped-text (htmlize-protect-string text)))
+    (if uri
+        (format "%s" (htmlize-attr-escape uri) escaped-text)
+      escaped-text)))
+
+(defun htmlize-escape-or-link (string)
+  ;; Escape STRING and/or add hyperlinks.  STRING comes from a
+  ;; `display' property.
+  (let ((pos 0) (end (length string)) outlist)
+    (while (< pos end)
+      (let* ((link (get-char-property pos 'htmlize-link string))
+             (next-link-change (next-single-property-change
+                                pos 'htmlize-link string end))
+             (chunk (substring string pos next-link-change)))
+        (push
+         (cond (link
+                (htmlize-format-link link chunk))
+               ((get-char-property 0 'htmlize-literal chunk)
+                chunk)
+               (t
+                (htmlize-protect-string chunk)))
+         outlist)
+        (setq pos next-link-change)))
+    (htmlize-concat (nreverse outlist))))
+
+(defun htmlize-display-prop-to-html (display text)
+  (let (desc)
+    (cond ((stringp display)
+           ;; Emacs ignores recursive display properties.
+           (htmlize-escape-or-link display))
+          ((not (eq (car-safe display) 'image))
+           (htmlize-protect-string text))
+          ((null (setq desc (funcall htmlize-transform-image
+                                     (cdr display) text)))
+           (htmlize-escape-or-link text))
+          ((stringp desc)
+           (htmlize-escape-or-link desc))
+          (t
+           (htmlize-generate-image desc text)))))
+
+(defun htmlize-string-to-html (string)
+  ;; Convert the string to HTML, including images attached as
+  ;; `display' property and links as `htmlize-link' property.  In a
+  ;; string without images or links, this is equivalent to
+  ;; `htmlize-protect-string'.
+  (let ((pos 0) (end (length string)) outlist)
+    (while (< pos end)
+      (let* ((display (get-char-property pos 'display string))
+             (next-display-change (next-single-property-change
+                                   pos 'display string end))
+             (chunk (substring string pos next-display-change)))
+        (push
+         (if display
+             (htmlize-display-prop-to-html display chunk)
+           (htmlize-escape-or-link chunk))
+         outlist)
+        (setq pos next-display-change)))
+    (htmlize-concat (nreverse outlist))))
+
+(defun htmlize-default-transform-image (imgprops _text)
+  "Default transformation of image descriptor to something usable in HTML.
+
+If `htmlize-use-images' is nil, the function always returns nil, meaning
+use original text.  Otherwise, it tries to find the image for images that
+specify a file name.  If `htmlize-force-inline-images' is non-nil, it also
+converts the :file attribute to :data and returns the modified property
+list."
+  (when htmlize-use-images
+    (when (plist-get imgprops :file)
+      (let ((location (plist-get (cdr (find-image (list imgprops))) :file)))
+        (when location
+          (setq imgprops (plist-put (cl-copy-list imgprops) :file location)))))
+    (if htmlize-force-inline-images
+        (let ((location (plist-get imgprops :file))
+              data)
+          (when location
+            (with-temp-buffer
+              (condition-case nil
+                  (progn
+                    (insert-file-contents-literally location)
+                    (setq data (buffer-string)))
+                (error nil))))
+          ;; if successful, return the new plist, otherwise return
+          ;; nil, which will use the original text
+          (and data
+               (plist-put (plist-put imgprops :file nil)
+                          :data data)))
+      imgprops)))
+
+(defun htmlize-alt-text (_imgprops origtext)
+  (and (/= (length origtext) 0)
+       (<= (length origtext) htmlize-max-alt-text)
+       (not (string-match "[\0-\x1f]" origtext))
+       origtext))
+
+(defun htmlize-generate-image (imgprops origtext)
+  (let* ((alt-text (htmlize-alt-text imgprops origtext))
+         (alt-attr (if alt-text
+                       (format " alt=\"%s\"" (htmlize-attr-escape alt-text))
+                     "")))
+    (cond ((plist-get imgprops :file)
+           ;; Try to find the image in image-load-path
+           (let* ((found-props (cdr (find-image (list imgprops))))
+                  (file (or (plist-get found-props :file)
+                            (plist-get imgprops :file))))
+             (format ""
+                     (htmlize-attr-escape (file-relative-name file))
+                     alt-attr)))
+          ((plist-get imgprops :data)
+           (format ""
+                   (or (plist-get imgprops :type) "")
+                   (base64-encode-string (plist-get imgprops :data))
+                   alt-attr)))))
+
+(defconst htmlize-ellipsis "...")
+(put-text-property 0 (length htmlize-ellipsis) 'htmlize-ellipsis t htmlize-ellipsis)
+
+(defun htmlize-match-inv-spec (inv)
+  (cl-member inv buffer-invisibility-spec
+             :key (lambda (i)
+                    (if (symbolp i) i (car i)))))
+
+(defun htmlize-decode-invisibility-spec (invisible)
+  ;; Return t, nil, or `ellipsis', depending on how invisible text should be inserted.
+
+  (if (not (listp buffer-invisibility-spec))
+      ;; If buffer-invisibility-spec is not a list, then all
+      ;; characters with non-nil `invisible' property are visible.
+      (not invisible)
+
+    ;; Otherwise, the value of a non-nil `invisible' property can be:
+    ;; 1. a symbol -- make the text invisible if it matches
+    ;;    buffer-invisibility-spec.
+    ;; 2. a list of symbols -- make the text invisible if
+    ;;    any symbol in the list matches
+    ;;    buffer-invisibility-spec.
+    ;; If the match of buffer-invisibility-spec has a non-nil
+    ;; CDR, replace the invisible text with an ellipsis.
+    (let ((match (if (symbolp invisible)
+                     (htmlize-match-inv-spec invisible)
+                   (cl-some #'htmlize-match-inv-spec invisible))))
+      (cond ((null match) t)
+            ((cdr-safe (car match)) 'ellipsis)
+            (t nil)))))
+
+(defun htmlize-add-before-after-strings (beg end text)
+  ;; Find overlays specifying before-string and after-string in [beg,
+  ;; pos).  If any are found, splice them into TEXT and return the new
+  ;; text.
+  (let (additions)
+    (dolist (overlay (overlays-in beg end))
+      (let ((before (overlay-get overlay 'before-string))
+            (after (overlay-get overlay 'after-string)))
+        (when after
+          (push (cons (- (overlay-end overlay) beg)
+                      after)
+                additions))
+        (when before
+          (push (cons (- (overlay-start overlay) beg)
+                      before)
+                additions))))
+    (if additions
+        (let ((textlist nil)
+              (strpos 0))
+          (dolist (add (cl-stable-sort additions #'< :key #'car))
+            (let ((addpos (car add))
+                  (addtext (cdr add)))
+              (push (substring text strpos addpos) textlist)
+              (push addtext textlist)
+              (setq strpos addpos)))
+          (push (substring text strpos) textlist)
+          (apply #'concat (nreverse textlist)))
+      text)))
+
+(defun htmlize-copy-prop (prop beg end string)
+  ;; Copy the specified property from the specified region of the
+  ;; buffer to the target string.  We cannot rely on Emacs to copy the
+  ;; property because we want to handle properties coming from both
+  ;; text properties and overlays.
+  (let ((pos beg))
+    (while (< pos end)
+      (let ((value (get-char-property pos prop))
+            (next-change (htmlize-next-change pos prop end)))
+        (when value
+          (put-text-property (- pos beg) (- next-change beg)
+                             prop value string))
+        (setq pos next-change)))))
+
+(defun htmlize-get-text-with-display (beg end)
+  ;; Like buffer-substring-no-properties, except it copies the
+  ;; `display' property from the buffer, if found.
+  (let ((text (buffer-substring-no-properties beg end)))
+    (htmlize-copy-prop 'display beg end text)
+    (htmlize-copy-prop 'htmlize-link beg end text)
+    (setq text (htmlize-add-before-after-strings beg end text))
+    text))
+
+(defun htmlize-buffer-substring-no-invisible (beg end)
+  ;; Like buffer-substring-no-properties, but don't copy invisible
+  ;; parts of the region.  Where buffer-substring-no-properties
+  ;; mandates an ellipsis to be shown, htmlize-ellipsis is inserted.
+  (let ((pos beg)
+	visible-list invisible show last-show next-change)
+    ;; Iterate over the changes in the `invisible' property and filter
+    ;; out the portions where it's non-nil, i.e. where the text is
+    ;; invisible.
+    (while (< pos end)
+      (setq invisible (get-char-property pos 'invisible)
+	    next-change (htmlize-next-change pos 'invisible end)
+            show (htmlize-decode-invisibility-spec invisible))
+      (cond ((eq show t)
+	     (push (htmlize-get-text-with-display pos next-change)
+                   visible-list))
+            ((and (eq show 'ellipsis)
+                  (not (eq last-show 'ellipsis))
+                  ;; Conflate successive ellipses.
+                  (push htmlize-ellipsis visible-list))))
+      (setq pos next-change last-show show))
+    (htmlize-concat (nreverse visible-list))))
+
+(defun htmlize-trim-ellipsis (text)
+  ;; Remove htmlize-ellipses ("...") from the beginning of TEXT if it
+  ;; starts with it.  It checks for the special property of the
+  ;; ellipsis so it doesn't work on ordinary text that begins with
+  ;; "...".
+  (if (get-text-property 0 'htmlize-ellipsis text)
+      (substring text (length htmlize-ellipsis))
+    text))
+
+(defconst htmlize-tab-spaces
+  ;; A table of strings with spaces.  (aref htmlize-tab-spaces 5) is
+  ;; like (make-string 5 ?\ ), except it doesn't cons.
+  (let ((v (make-vector 32 nil)))
+    (dotimes (i (length v))
+      (setf (aref v i) (make-string i ?\ )))
+    v))
+
+(defun htmlize-untabify-string (text start-column)
+  "Untabify TEXT, assuming it starts at START-COLUMN."
+  (let ((column start-column)
+	(last-match 0)
+	(chunk-start 0)
+	chunks match-pos tab-size)
+    (while (string-match "[\t\n]" text last-match)
+      (setq match-pos (match-beginning 0))
+      (cond ((eq (aref text match-pos) ?\t)
+	     ;; Encountered a tab: create a chunk of text followed by
+	     ;; the expanded tab.
+	     (push (substring text chunk-start match-pos) chunks)
+	     ;; Increase COLUMN by the length of the text we've
+	     ;; skipped since last tab or newline.  (Encountering
+	     ;; newline resets it.)
+	     (cl-incf column (- match-pos last-match))
+	     ;; Calculate tab size based on tab-width and COLUMN.
+	     (setq tab-size (- tab-width (% column tab-width)))
+	     ;; Expand the tab, carefully recreating the `display'
+	     ;; property if one was on the TAB.
+             (let ((display (get-text-property match-pos 'display text))
+                   (expanded-tab (aref htmlize-tab-spaces tab-size)))
+               (when display
+                 (put-text-property 0 tab-size 'display display expanded-tab))
+               (push expanded-tab chunks))
+	     (cl-incf column tab-size)
+	     (setq chunk-start (1+ match-pos)))
+	    (t
+	     ;; Reset COLUMN at beginning of line.
+	     (setq column 0)))
+      (setq last-match (1+ match-pos)))
+    ;; If no chunks have been allocated, it means there have been no
+    ;; tabs to expand.  Return TEXT unmodified.
+    (if (null chunks)
+	text
+      (when (< chunk-start (length text))
+	;; Push the remaining chunk.
+	(push (substring text chunk-start) chunks))
+      ;; Generate the output from the available chunks.
+      (htmlize-concat (nreverse chunks)))))
+
+(defun htmlize-extract-text (beg end trailing-ellipsis)
+  ;; Extract buffer text, sans the invisible parts.  Then
+  ;; untabify it and escape the HTML metacharacters.
+  (let ((text (htmlize-buffer-substring-no-invisible beg end)))
+    (when trailing-ellipsis
+      (setq text (htmlize-trim-ellipsis text)))
+    ;; If TEXT ends up empty, don't change trailing-ellipsis.
+    (when (> (length text) 0)
+      (setq trailing-ellipsis
+            (get-text-property (1- (length text))
+                               'htmlize-ellipsis text)))
+    (when htmlize-untabify
+      (setq text (htmlize-untabify-string text (current-column))))
+    (setq text (htmlize-string-to-html text))
+    (cl-values text trailing-ellipsis)))
+
+(defun htmlize-despam-address (string)
+  "Replace every occurrence of '@' in STRING with %40.
+This is used to protect mailto links without modifying their meaning."
+  ;; Suggested by Ville Skytta.
+  (while (string-match "@" string)
+    (setq string (replace-match "%40" nil t string)))
+  string)
+
+(defun htmlize-make-tmp-overlay (beg end props)
+  (let ((overlay (make-overlay beg end)))
+    (overlay-put overlay 'htmlize-tmp-overlay t)
+    (while props
+      (overlay-put overlay (pop props) (pop props)))
+    overlay))
+
+(defun htmlize-delete-tmp-overlays ()
+  (dolist (overlay (overlays-in (point-min) (point-max)))
+    (when (overlay-get overlay 'htmlize-tmp-overlay)
+      (delete-overlay overlay))))
+
+(defun htmlize-make-link-overlay (beg end uri)
+  (htmlize-make-tmp-overlay beg end `(htmlize-link (:uri ,uri))))
+
+(defun htmlize-create-auto-links ()
+  "Add `htmlize-link' property to all mailto links in the buffer."
+  (save-excursion
+    (goto-char (point-min))
+    (while (re-search-forward
+            "<\\(\\(mailto:\\)?\\([-=+_.a-zA-Z0-9]+@[-_.a-zA-Z0-9]+\\)\\)>"
+            nil t)
+      (let* ((address (match-string 3))
+             (beg (match-beginning 0)) (end (match-end 0))
+             (uri (concat "mailto:" (htmlize-despam-address address))))
+        (htmlize-make-link-overlay beg end uri)))
+    (goto-char (point-min))
+    (while (re-search-forward "<\\(\\(URL:\\)?\\([a-zA-Z]+://[^;]+\\)\\)>"
+                              nil t)
+      (htmlize-make-link-overlay
+       (match-beginning 0) (match-end 0) (match-string 3)))))
+
+;; Tests for htmlize-create-auto-links:
+
+;; 
+;; 
+;; 
+;; 
+;; 
+;; 
+
+(defun htmlize-shadow-form-feeds ()
+  (let ((s "\n
")) + (put-text-property 0 (length s) 'htmlize-literal t s) + (let ((disp `(display ,s))) + (while (re-search-forward "\n\^L" nil t) + (let* ((beg (match-beginning 0)) + (end (match-end 0)) + (form-feed-pos (1+ beg)) + ;; don't process ^L if invisible or covered by `display' + (show (and (htmlize-decode-invisibility-spec + (get-char-property form-feed-pos 'invisible)) + (not (get-char-property form-feed-pos 'display))))) + (when show + (htmlize-make-tmp-overlay beg end disp))))))) + +(defun htmlize-defang-local-variables () + ;; Juri Linkov reports that an HTML-ized "Local variables" can lead + ;; visiting the HTML to fail with "Local variables list is not + ;; properly terminated". He suggested changing the phrase to + ;; syntactically equivalent HTML that Emacs doesn't recognize. + (goto-char (point-min)) + (while (search-forward "Local Variables:" nil t) + (replace-match "Local Variables:" nil t))) + + +;;; Color handling. + +(defvar htmlize-x-library-search-path + `(,data-directory + "/etc/X11/rgb.txt" + "/usr/share/X11/rgb.txt" + ;; the remainder of this list really belongs in a museum + "/usr/X11R6/lib/X11/" + "/usr/X11R5/lib/X11/" + "/usr/lib/X11R6/X11/" + "/usr/lib/X11R5/X11/" + "/usr/local/X11R6/lib/X11/" + "/usr/local/X11R5/lib/X11/" + "/usr/local/lib/X11R6/X11/" + "/usr/local/lib/X11R5/X11/" + "/usr/X11/lib/X11/" + "/usr/lib/X11/" + "/usr/local/lib/X11/" + "/usr/X386/lib/X11/" + "/usr/x386/lib/X11/" + "/usr/XFree86/lib/X11/" + "/usr/unsupported/lib/X11/" + "/usr/athena/lib/X11/" + "/usr/local/x11r5/lib/X11/" + "/usr/lpp/Xamples/lib/X11/" + "/usr/openwin/lib/X11/" + "/usr/openwin/share/lib/X11/")) + +(defun htmlize-get-color-rgb-hash (&optional rgb-file) + "Return a hash table mapping X color names to RGB values. +The keys in the hash table are X11 color names, and the values are the +#rrggbb RGB specifications, extracted from `rgb.txt'. + +If RGB-FILE is nil, the function will try hard to find a suitable file +in the system directories. + +If no rgb.txt file is found, return nil." + (let ((rgb-file (or rgb-file (locate-file + "rgb.txt" + htmlize-x-library-search-path))) + (hash nil)) + (when rgb-file + (with-temp-buffer + (insert-file-contents rgb-file) + (setq hash (make-hash-table :test 'equal)) + (while (not (eobp)) + (cond ((looking-at "^\\s-*\\([!#]\\|$\\)") + ;; Skip comments and empty lines. + ) + ((looking-at + "[ \t]*\\([0-9]+\\)[ \t]+\\([0-9]+\\)[ \t]+\\([0-9]+\\)[ \t]+\\(.*\\)") + (setf (gethash (downcase (match-string 4)) hash) + (format "#%02x%02x%02x" + (string-to-number (match-string 1)) + (string-to-number (match-string 2)) + (string-to-number (match-string 3))))) + (t + (error + "Unrecognized line in %s: %s" + rgb-file + (buffer-substring (point) (progn (end-of-line) (point)))))) + (forward-line 1)))) + hash)) + +;; Compile the RGB map when loaded. On systems where rgb.txt is +;; missing, the value of the variable will be nil, and rgb.txt will +;; not be used. +(defvar htmlize-color-rgb-hash (htmlize-get-color-rgb-hash)) + +;;; Face handling. + +(defun htmlize-face-color-internal (face fg) + ;; Used only under GNU Emacs. Return the color of FACE, but don't + ;; return "unspecified-fg" or "unspecified-bg". If the face is + ;; `default' and the color is unspecified, look up the color in + ;; frame parameters. + (let* ((function (if fg #'face-foreground #'face-background)) + (color (funcall function face nil t))) + (when (and (eq face 'default) (null color)) + (setq color (cdr (assq (if fg 'foreground-color 'background-color) + (frame-parameters))))) + (when (or (eq color 'unspecified) + (equal color "unspecified-fg") + (equal color "unspecified-bg")) + (setq color nil)) + (when (and (eq face 'default) + (null color)) + ;; Assuming black on white doesn't seem right, but I can't think + ;; of anything better to do. + (setq color (if fg "black" "white"))) + color)) + +(defun htmlize-face-foreground (face) + ;; Return the name of the foreground color of FACE. If FACE does + ;; not specify a foreground color, return nil. + (htmlize-face-color-internal face t)) + +(defun htmlize-face-background (face) + ;; Return the name of the background color of FACE. If FACE does + ;; not specify a background color, return nil. + ;; GNU Emacs. + (htmlize-face-color-internal face nil)) + +;; Convert COLOR to the #RRGGBB string. If COLOR is already in that +;; format, it's left unchanged. + +(defun htmlize-color-to-rgb (color) + (let ((rgb-string nil)) + (cond ((null color) + ;; Ignore nil COLOR because it means that the face is not + ;; specifying any color. Hence (htmlize-color-to-rgb nil) + ;; returns nil. + ) + ((string-match "\\`#" color) + ;; The color is already in #rrggbb format. + (setq rgb-string color)) + ((and htmlize-use-rgb-txt + htmlize-color-rgb-hash) + ;; Use of rgb.txt is requested, and it's available on the + ;; system. Use it. + (setq rgb-string (gethash (downcase color) htmlize-color-rgb-hash))) + (t + ;; We're getting the RGB components from Emacs. + (let ((rgb (mapcar (lambda (arg) + (/ arg 256)) + (color-values color)))) + (when rgb + (setq rgb-string (apply #'format "#%02x%02x%02x" rgb)))))) + ;; If RGB-STRING is still nil, it means the color cannot be found, + ;; for whatever reason. In that case just punt and return COLOR. + ;; Most browsers support a decent set of color names anyway. + (or rgb-string color))) + +;; We store the face properties we care about into an +;; `htmlize-fstruct' type. That way we only have to analyze face +;; properties, which can be time consuming, once per each face. The +;; mapping between Emacs faces and htmlize-fstructs is established by +;; htmlize-make-face-map. The name "fstruct" refers to variables of +;; type `htmlize-fstruct', while the term "face" is reserved for Emacs +;; faces. + +(cl-defstruct htmlize-fstruct + foreground ; foreground color, #rrggbb + background ; background color, #rrggbb + size ; size + boldp ; whether face is bold + italicp ; whether face is italic + underlinep ; whether face is underlined + overlinep ; whether face is overlined + strikep ; whether face is struck through + css-name ; CSS name of face + ) + +(defun htmlize-face-set-from-keyword-attr (fstruct attr value) + ;; For ATTR and VALUE, set the equivalent value in FSTRUCT. + (cl-case attr + (:foreground + (setf (htmlize-fstruct-foreground fstruct) (htmlize-color-to-rgb value))) + (:background + (setf (htmlize-fstruct-background fstruct) (htmlize-color-to-rgb value))) + (:height + (setf (htmlize-fstruct-size fstruct) value)) + (:weight + (when (string-match (symbol-name value) "bold") + (setf (htmlize-fstruct-boldp fstruct) t))) + (:slant + (setf (htmlize-fstruct-italicp fstruct) (or (eq value 'italic) + (eq value 'oblique)))) + (:bold + (setf (htmlize-fstruct-boldp fstruct) value)) + (:italic + (setf (htmlize-fstruct-italicp fstruct) value)) + (:underline + (setf (htmlize-fstruct-underlinep fstruct) value)) + (:overline + (setf (htmlize-fstruct-overlinep fstruct) value)) + (:strike-through + (setf (htmlize-fstruct-strikep fstruct) value)))) + +(defun htmlize-face-size (face) + ;; The size (height) of FACE, taking inheritance into account. + ;; Only works in Emacs 21 and later. + (let* ((face-list (list face)) + (head face-list) + (tail face-list)) + (while head + (let ((inherit (face-attribute (car head) :inherit))) + (cond ((listp inherit) + (setcdr tail (cl-copy-list inherit)) + (setq tail (last tail))) + ((eq inherit 'unspecified)) + (t + (setcdr tail (list inherit)) + (setq tail (cdr tail))))) + (pop head)) + (let ((size-list + (cl-loop + for f in face-list + for h = (and (facep f) (face-attribute f :height)) + collect (if (eq h 'unspecified) nil h)))) + (cl-reduce 'htmlize-merge-size (cons nil size-list))))) + +(defun htmlize-face-css-name (face) + ;; Generate the css-name property for the given face. Emacs places + ;; no restrictions on the names of symbols that represent faces -- + ;; any characters may be in the name, even control chars. We try + ;; hard to beat the face name into shape, both esthetically and + ;; according to CSS1 specs. + (let ((name (downcase (symbol-name face)))) + (when (string-match "\\`font-lock-" name) + ;; font-lock-FOO-face -> FOO. + (setq name (replace-match "" t t name))) + (when (string-match "-face\\'" name) + ;; Drop the redundant "-face" suffix. + (setq name (replace-match "" t t name))) + (while (string-match "[^-a-zA-Z0-9]" name) + ;; Drop the non-alphanumerics. + (setq name (replace-match "X" t t name))) + (when (string-match "\\`[-0-9]" name) + ;; CSS identifiers may not start with a digit. + (setq name (concat "X" name))) + ;; After these transformations, the face could come out empty. + (when (equal name "") + (setq name "face")) + ;; Apply the prefix. + (concat htmlize-css-name-prefix name))) + +(defun htmlize-face-to-fstruct-1 (face) + "Convert Emacs face FACE to fstruct, internal." + (let ((fstruct (make-htmlize-fstruct + :foreground (htmlize-color-to-rgb + (htmlize-face-foreground face)) + :background (htmlize-color-to-rgb + (htmlize-face-background face))))) + ;; GNU Emacs + (dolist (attr '(:weight :slant :underline :overline :strike-through)) + (let ((value (face-attribute face attr nil t))) + (when (and value (not (eq value 'unspecified))) + (htmlize-face-set-from-keyword-attr fstruct attr value)))) + (let ((size (htmlize-face-size face))) + (unless (eql size 1.0) ; ignore non-spec + (setf (htmlize-fstruct-size fstruct) size))) + (setf (htmlize-fstruct-css-name fstruct) (htmlize-face-css-name face)) + fstruct)) + +(defun htmlize-face-to-fstruct (face) + (let* ((face-list (or (and (symbolp face) + (cdr (assq face face-remapping-alist))) + (list face))) + (fstruct (htmlize-merge-faces + (mapcar (lambda (face) + (if (symbolp face) + (or (htmlize-get-override-fstruct face) + (htmlize-face-to-fstruct-1 face)) + (htmlize-attrlist-to-fstruct face))) + (nreverse face-list))))) + (when (symbolp face) + (setf (htmlize-fstruct-css-name fstruct) (htmlize-face-css-name face))) + fstruct)) + +(defmacro htmlize-copy-attr-if-set (attr-list dest source) + ;; Generate code with the following pattern: + ;; (progn + ;; (when (htmlize-fstruct-ATTR source) + ;; (setf (htmlize-fstruct-ATTR dest) (htmlize-fstruct-ATTR source))) + ;; ...) + ;; for the given list of boolean attributes. + (cons 'progn + (cl-loop for attr in attr-list + for attr-sym = (intern (format "htmlize-fstruct-%s" attr)) + collect `(when (,attr-sym ,source) + (setf (,attr-sym ,dest) (,attr-sym ,source)))))) + +(defun htmlize-merge-size (merged next) + ;; Calculate the size of the merge of MERGED and NEXT. + (cond ((null merged) next) + ((integerp next) next) + ((null next) merged) + ((floatp merged) (* merged next)) + ((integerp merged) (round (* merged next))))) + +(defun htmlize-merge-two-faces (merged next) + (htmlize-copy-attr-if-set + (foreground background boldp italicp underlinep overlinep strikep) + merged next) + (setf (htmlize-fstruct-size merged) + (htmlize-merge-size (htmlize-fstruct-size merged) + (htmlize-fstruct-size next))) + merged) + +(defun htmlize-merge-faces (fstruct-list) + (cond ((null fstruct-list) + ;; Nothing to do, return a dummy face. + (make-htmlize-fstruct)) + ((null (cdr fstruct-list)) + ;; Optimize for the common case of a single face, simply + ;; return it. + (car fstruct-list)) + (t + (cl-reduce #'htmlize-merge-two-faces + (cons (make-htmlize-fstruct) fstruct-list))))) + +;; GNU Emacs 20+ supports attribute lists in `face' properties. For +;; example, you can use `(:foreground "red" :weight bold)' as an +;; overlay's "face", or you can even use a list of such lists, etc. +;; We call those "attrlists". +;; +;; htmlize supports attrlist by converting them to fstructs, the same +;; as with regular faces. + +(defun htmlize-attrlist-to-fstruct (attrlist &optional name) + ;; Like htmlize-face-to-fstruct, but accepts an ATTRLIST as input. + (let ((fstruct (make-htmlize-fstruct))) + (cond ((eq (car attrlist) 'foreground-color) + ;; ATTRLIST is (foreground-color . COLOR) + (setf (htmlize-fstruct-foreground fstruct) + (htmlize-color-to-rgb (cdr attrlist)))) + ((eq (car attrlist) 'background-color) + ;; ATTRLIST is (background-color . COLOR) + (setf (htmlize-fstruct-background fstruct) + (htmlize-color-to-rgb (cdr attrlist)))) + (t + ;; ATTRLIST is a plist. + (while attrlist + (let ((attr (pop attrlist)) + (value (pop attrlist))) + (when (and value (not (eq value 'unspecified))) + (htmlize-face-set-from-keyword-attr fstruct attr value)))))) + (setf (htmlize-fstruct-css-name fstruct) (or name "custom")) + fstruct)) + +(defun htmlize-decode-face-prop (prop) + "Turn face property PROP into a list of face-like objects." + ;; PROP can be a symbol naming a face, a string naming such a + ;; symbol, a cons (foreground-color . COLOR) or (background-color + ;; COLOR), a property list (:attr1 val1 :attr2 val2 ...), or a list + ;; of any of those. + ;; + ;; (htmlize-decode-face-prop 'face) -> (face) + ;; (htmlize-decode-face-prop '(face1 face2)) -> (face1 face2) + ;; (htmlize-decode-face-prop '(:attr "val")) -> ((:attr "val")) + ;; (htmlize-decode-face-prop '((:attr "val") face (foreground-color "red"))) + ;; -> ((:attr "val") face (foreground-color "red")) + ;; + ;; Unrecognized atoms or non-face symbols/strings are silently + ;; stripped away. + (cond ((null prop) + nil) + ((symbolp prop) + (and (facep prop) + (list prop))) + ((stringp prop) + (and (facep (intern-soft prop)) + (list prop))) + ((atom prop) + nil) + ((and (symbolp (car prop)) + (eq ?: (aref (symbol-name (car prop)) 0))) + (list prop)) + ((or (eq (car prop) 'foreground-color) + (eq (car prop) 'background-color)) + (list prop)) + (t + (apply #'nconc (mapcar #'htmlize-decode-face-prop prop))))) + +(defun htmlize-get-override-fstruct (face) + (let* ((raw-def (plist-get htmlize-face-overrides face)) + (def (cond ((stringp raw-def) (list :foreground raw-def)) + ((listp raw-def) raw-def) + (t + (error (format (concat "face override must be an " + "attribute list or string, got %s") + raw-def)))))) + (and def + (htmlize-attrlist-to-fstruct def (symbol-name face))))) + +(defun htmlize-make-face-map (faces) + ;; Return a hash table mapping Emacs faces to htmlize's fstructs. + ;; The keys are either face symbols or attrlists, so the test + ;; function must be `equal'. + (let ((face-map (make-hash-table :test 'equal)) + css-names) + (dolist (face faces) + (unless (gethash face face-map) + ;; Haven't seen FACE yet; convert it to an fstruct and cache + ;; it. + (let ((fstruct (htmlize-face-to-fstruct face))) + (setf (gethash face face-map) fstruct) + (let* ((css-name (htmlize-fstruct-css-name fstruct)) + (new-name css-name) + (i 0)) + ;; Uniquify the face's css-name by using NAME-1, NAME-2, + ;; etc. + (while (member new-name css-names) + (setq new-name (format "%s-%s" css-name (cl-incf i)))) + (unless (equal new-name css-name) + (setf (htmlize-fstruct-css-name fstruct) new-name)) + (push new-name css-names))))) + face-map)) + +(defun htmlize-unstringify-face (face) + "If FACE is a string, return it interned, otherwise return it unchanged." + (if (stringp face) + (intern face) + face)) + +(defun htmlize-faces-in-buffer () + "Return a list of faces used in the current buffer. +This is the set of faces specified by the `face' text property and by buffer +overlays that specify `face'." + (let (faces) + ;; Faces used by text properties. + (let ((pos (point-min)) face-prop next) + (while (< pos (point-max)) + (setq face-prop (get-text-property pos 'face) + next (or (next-single-property-change pos 'face) (point-max))) + (setq faces (cl-nunion (htmlize-decode-face-prop face-prop) + faces :test 'equal)) + (setq pos next))) + ;; Faces used by overlays. + (dolist (overlay (overlays-in (point-min) (point-max))) + (let ((face-prop (overlay-get overlay 'face))) + (setq faces (cl-nunion (htmlize-decode-face-prop face-prop) + faces :test 'equal)))) + faces)) + +;; htmlize-faces-at-point returns the faces in use at point. The +;; faces are sorted by increasing priority, i.e. the last face takes +;; precedence. +;; +;; This returns all the faces in the `face' property and all the faces +;; in the overlays at point. + +(defun htmlize-faces-at-point () + (let (all-faces) + ;; Faces from text properties. + (let ((face-prop (get-text-property (point) 'face))) + ;; we need to reverse the `face' prop because we want + ;; more specific faces to come later + (setq all-faces (nreverse (htmlize-decode-face-prop face-prop)))) + ;; Faces from overlays. + (let ((overlays + ;; Collect overlays at point that specify `face'. + (cl-delete-if-not (lambda (o) + (overlay-get o 'face)) + (nreverse (overlays-at (point) t)))) + list face-prop) + (dolist (overlay overlays) + (setq face-prop (overlay-get overlay 'face) + list (nconc (htmlize-decode-face-prop face-prop) list))) + ;; Under "Merging Faces" the manual explicitly states + ;; that faces specified by overlays take precedence over + ;; faces specified by text properties. + (setq all-faces (nconc all-faces list))) + all-faces)) + +;; htmlize supports generating HTML in several flavors, some of which +;; use CSS, and others the element. We take an OO approach and +;; define "methods" that indirect to the functions that depend on +;; `htmlize-output-type'. The currently used methods are `doctype', +;; `insert-head', `body-tag', `pre-tag', and `text-markup'. Not all +;; output types define all methods. +;; +;; Methods are called either with (htmlize-method METHOD ARGS...) +;; special form, or by accessing the function with +;; (htmlize-method-function 'METHOD) and calling (funcall FUNCTION). +;; The latter form is useful in tight loops because `htmlize-method' +;; conses. + +(defmacro htmlize-method (method &rest args) + ;; Expand to (htmlize-TYPE-METHOD ...ARGS...). TYPE is the value of + ;; `htmlize-output-type' at run time. + `(funcall (htmlize-method-function ',method) ,@args)) + +(defun htmlize-method-function (method) + ;; Return METHOD's function definition for the current output type. + ;; The returned object can be safely funcalled. + (let ((sym (intern (format "htmlize-%s-%s" htmlize-output-type method)))) + (indirect-function (if (fboundp sym) + sym + (let ((default (intern (concat "htmlize-default-" + (symbol-name method))))) + (if (fboundp default) + default + 'ignore)))))) + +(defvar htmlize-memoization-table (make-hash-table :test 'equal)) + +(defmacro htmlize-memoize (key generator) + "Return the value of GENERATOR, memoized as KEY. +That means that GENERATOR will be evaluated and returned the first time +it's called with the same value of KEY. All other times, the cached +\(memoized) value will be returned." + (let ((value (cl-gensym))) + `(let ((,value (gethash ,key htmlize-memoization-table))) + (unless ,value + (setq ,value ,generator) + (setf (gethash ,key htmlize-memoization-table) ,value)) + ,value))) + +;;; Default methods. + +(defun htmlize-default-doctype () + nil ; no doc-string + ;; Note that the `font' output is technically invalid under this DTD + ;; because the DTD doesn't allow embedding in
.
+  ""
+  )
+
+(defun htmlize-default-body-tag (face-map)
+  nil					; no doc-string
+  face-map ; shut up the byte-compiler
+  "")
+
+(defun htmlize-default-pre-tag (face-map)
+  nil					; no doc-string
+  face-map ; shut up the byte-compiler
+  "
")
+
+
+;;; CSS based output support.
+
+;; Internal function; not a method.
+(defun htmlize-css-specs (fstruct)
+  (let (result)
+    (when (htmlize-fstruct-foreground fstruct)
+      (push (format "color: %s;" (htmlize-fstruct-foreground fstruct))
+	    result))
+    (when (htmlize-fstruct-background fstruct)
+      (push (format "background-color: %s;"
+		    (htmlize-fstruct-background fstruct))
+	    result))
+    (let ((size (htmlize-fstruct-size fstruct)))
+      (when (and size (not (eq htmlize-ignore-face-size t)))
+	(cond ((floatp size)
+	       (push (format "font-size: %d%%;" (* 100 size)) result))
+	      ((not (eq htmlize-ignore-face-size 'absolute))
+	       (push (format "font-size: %spt;" (/ size 10.0)) result)))))
+    (when (htmlize-fstruct-boldp fstruct)
+      (push "font-weight: bold;" result))
+    (when (htmlize-fstruct-italicp fstruct)
+      (push "font-style: italic;" result))
+    (when (htmlize-fstruct-underlinep fstruct)
+      (push "text-decoration: underline;" result))
+    (when (htmlize-fstruct-overlinep fstruct)
+      (push "text-decoration: overline;" result))
+    (when (htmlize-fstruct-strikep fstruct)
+      (push "text-decoration: line-through;" result))
+    (nreverse result)))
+
+(defun htmlize-css-insert-head (buffer-faces face-map)
+  (insert "    \n"))
+
+(defun htmlize-css-text-markup (fstruct-list buffer)
+  ;; Open the markup needed to insert text colored with FACES into
+  ;; BUFFER.  Return the function that closes the markup.
+
+  ;; In CSS mode, this is easy: just nest the text in one  tag for each face in FSTRUCT-LIST.
+  (dolist (fstruct fstruct-list)
+    (princ "" buffer))
+  (htmlize-lexlet ((fstruct-list fstruct-list) (buffer buffer))
+    (lambda ()
+      (dolist (fstruct fstruct-list)
+        (ignore fstruct)                ; shut up the byte-compiler
+        (princ "" buffer)))))
+
+;; `inline-css' output support.
+
+(defun htmlize-inline-css-body-tag (face-map)
+  (format ""
+	  (mapconcat #'identity (htmlize-css-specs (gethash 'default face-map))
+		     " ")))
+
+(defun htmlize-inline-css-pre-tag (face-map)
+  (if htmlize-pre-style
+      (format "
"
+              (mapconcat #'identity (htmlize-css-specs (gethash 'default face-map))
+                         " "))
+    (format "
")))
+
+(defun htmlize-inline-css-text-markup (fstruct-list buffer)
+  (let* ((merged (htmlize-merge-faces fstruct-list))
+	 (style (htmlize-memoize
+		 merged
+		 (let ((specs (htmlize-css-specs merged)))
+		   (and specs
+			(mapconcat #'identity (htmlize-css-specs merged) " "))))))
+    (when style
+      (princ "" buffer))
+    (htmlize-lexlet ((style style) (buffer buffer))
+      (lambda ()
+        (when style
+          (princ "" buffer))))))
+
+;;; `font' tag based output support.
+
+(defun htmlize-font-body-tag (face-map)
+  (let ((fstruct (gethash 'default face-map)))
+    (format ""
+	    (htmlize-fstruct-foreground fstruct)
+	    (htmlize-fstruct-background fstruct))))
+
+(defun htmlize-font-pre-tag (face-map)
+  (if htmlize-pre-style
+      (let ((fstruct (gethash 'default face-map)))
+        (format "
"
+                (htmlize-fstruct-foreground fstruct)
+                (htmlize-fstruct-background fstruct)))
+    (format "
")))
+       
+(defun htmlize-font-text-markup (fstruct-list buffer)
+  ;; In `font' mode, we use the traditional HTML means of altering
+  ;; presentation:  tag for colors,  for bold,  for
+  ;; underline, and  for strike-through.
+  (let* ((merged (htmlize-merge-faces fstruct-list))
+	 (markup (htmlize-memoize
+		  merged
+		  (cons (concat
+			 (and (htmlize-fstruct-foreground merged)
+			      (format "" (htmlize-fstruct-foreground merged)))
+			 (and (htmlize-fstruct-boldp merged)      "")
+			 (and (htmlize-fstruct-italicp merged)    "")
+			 (and (htmlize-fstruct-underlinep merged) "")
+			 (and (htmlize-fstruct-strikep merged)    ""))
+			(concat
+			 (and (htmlize-fstruct-strikep merged)    "")
+			 (and (htmlize-fstruct-underlinep merged) "")
+			 (and (htmlize-fstruct-italicp merged)    "")
+			 (and (htmlize-fstruct-boldp merged)      "")
+			 (and (htmlize-fstruct-foreground merged) ""))))))
+    (princ (car markup) buffer)
+    (htmlize-lexlet ((markup markup) (buffer buffer))
+      (lambda ()
+        (princ (cdr markup) buffer)))))
+
+(defun htmlize-buffer-1 ()
+  ;; Internal function; don't call it from outside this file.  Htmlize
+  ;; current buffer, writing the resulting HTML to a new buffer, and
+  ;; return it.  Unlike htmlize-buffer, this doesn't change current
+  ;; buffer or use switch-to-buffer.
+  (save-excursion
+    ;; Protect against the hook changing the current buffer.
+    (save-excursion
+      (run-hooks 'htmlize-before-hook))
+    ;; Convince font-lock support modes to fontify the entire buffer
+    ;; in advance.
+    (htmlize-ensure-fontified)
+    (clrhash htmlize-extended-character-cache)
+    (clrhash htmlize-memoization-table)
+    ;; It's important that the new buffer inherits default-directory
+    ;; from the current buffer.
+    (let ((htmlbuf (generate-new-buffer (if (buffer-file-name)
+                                            (htmlize-make-file-name
+                                             (file-name-nondirectory
+                                              (buffer-file-name)))
+                                          "*html*")))
+          (completed nil))
+      (unwind-protect
+          (let* ((buffer-faces (htmlize-faces-in-buffer))
+                 (face-map (htmlize-make-face-map (cl-adjoin 'default buffer-faces)))
+                 (places (cl-gensym))
+                 (title (if (buffer-file-name)
+                            (file-name-nondirectory (buffer-file-name))
+                          (buffer-name))))
+            (when htmlize-generate-hyperlinks
+              (htmlize-create-auto-links))
+            (when htmlize-replace-form-feeds
+              (htmlize-shadow-form-feeds))
+
+            ;; Initialize HTMLBUF and insert the HTML prolog.
+            (with-current-buffer htmlbuf
+              (buffer-disable-undo)
+              (insert (htmlize-method doctype) ?\n
+                      (format "\n"
+                              htmlize-version htmlize-output-type)
+                      "\n  ")
+              (put places 'head-start (point-marker))
+              (insert "\n"
+                      "    " (htmlize-protect-string title) "\n"
+                      (if htmlize-html-charset
+                          (format (concat "    \n")
+                                  htmlize-html-charset)
+                        "")
+                      htmlize-head-tags)
+              (htmlize-method insert-head buffer-faces face-map)
+              (insert "  ")
+              (put places 'head-end (point-marker))
+              (insert "\n  ")
+              (put places 'body-start (point-marker))
+              (insert (htmlize-method body-tag face-map)
+                      "\n    ")
+              (put places 'content-start (point-marker))
+              (insert (htmlize-method pre-tag face-map) "\n"))
+            (let ((text-markup
+                   ;; Get the inserter method, so we can funcall it inside
+                   ;; the loop.  Not calling `htmlize-method' in the loop
+                   ;; body yields a measurable speed increase.
+                   (htmlize-method-function 'text-markup))
+                  ;; Declare variables used in loop body outside the loop
+                  ;; because it's faster to establish `let' bindings only
+                  ;; once.
+                  next-change text face-list trailing-ellipsis
+                  fstruct-list last-fstruct-list
+                  (close-markup (lambda ())))
+              ;; This loop traverses and reads the source buffer, appending
+              ;; the resulting HTML to HTMLBUF.  This method is fast
+              ;; because: 1) it doesn't require examining the text
+              ;; properties char by char (htmlize-next-face-change is used
+              ;; to move between runs with the same face), and 2) it doesn't
+              ;; require frequent buffer switches, which are slow because
+              ;; they rebind all buffer-local vars.
+              (goto-char (point-min))
+              (while (not (eobp))
+                (setq next-change (htmlize-next-face-change (point)))
+                ;; Get faces in use between (point) and NEXT-CHANGE, and
+                ;; convert them to fstructs.
+                (setq face-list (htmlize-faces-at-point)
+                      fstruct-list (delq nil (mapcar (lambda (f)
+                                                       (gethash f face-map))
+                                                     face-list)))
+                (cl-multiple-value-setq (text trailing-ellipsis)
+                  (htmlize-extract-text (point) next-change trailing-ellipsis))
+                ;; Don't bother writing anything if there's no text (this
+                ;; happens in invisible regions).
+                (when (> (length text) 0)
+                  ;; Open the new markup if necessary and insert the text.
+                  (when (not (cl-equalp fstruct-list last-fstruct-list))
+                    (funcall close-markup)
+                    (setq last-fstruct-list fstruct-list
+                          close-markup (funcall text-markup fstruct-list htmlbuf)))
+                  (princ text htmlbuf))
+                (goto-char next-change))
+
+              ;; We've gone through the buffer; close the markup from
+              ;; the last run, if any.
+              (funcall close-markup))
+
+            ;; Insert the epilog and post-process the buffer.
+            (with-current-buffer htmlbuf
+              (insert "
") + (put places 'content-end (point-marker)) + (insert "\n ") + (put places 'body-end (point-marker)) + (insert "\n\n") + (htmlize-defang-local-variables) + (goto-char (point-min)) + (when htmlize-html-major-mode + ;; What sucks about this is that the minor modes, most notably + ;; font-lock-mode, won't be initialized. Oh well. + (funcall htmlize-html-major-mode)) + (set (make-local-variable 'htmlize-buffer-places) + (symbol-plist places)) + (run-hooks 'htmlize-after-hook) + (buffer-enable-undo)) + (setq completed t) + htmlbuf) + + (when (not completed) + (kill-buffer htmlbuf)) + (htmlize-delete-tmp-overlays))))) + +;; Utility functions. + +(defmacro htmlize-with-fontify-message (&rest body) + ;; When forcing fontification of large buffers in + ;; htmlize-ensure-fontified, inform the user that he is waiting for + ;; font-lock, not for htmlize to finish. + `(progn + (if (> (buffer-size) 65536) + (message "Forcing fontification of %s..." + (buffer-name (current-buffer)))) + ,@body + (if (> (buffer-size) 65536) + (message "Forcing fontification of %s...done" + (buffer-name (current-buffer)))))) + +(defun htmlize-ensure-fontified () + ;; If font-lock is being used, ensure that the "support" modes + ;; actually fontify the buffer. If font-lock is not in use, we + ;; don't care because, except in htmlize-file, we don't force + ;; font-lock on the user. + (when font-lock-mode + ;; In part taken from ps-print-ensure-fontified in GNU Emacs 21. + (when (and (boundp 'jit-lock-mode) + (symbol-value 'jit-lock-mode)) + (htmlize-with-fontify-message + (jit-lock-fontify-now (point-min) (point-max)))) + + (if (fboundp 'font-lock-ensure) + (font-lock-ensure) + ;; Emacs prior to 25.1 + (with-no-warnings + (font-lock-mode 1) + (font-lock-fontify-buffer))))) + + +;;;###autoload +(defun htmlize-buffer (&optional buffer) + "Convert BUFFER to HTML, preserving colors and decorations. + +The generated HTML is available in a new buffer, which is returned. +When invoked interactively, the new buffer is selected in the current +window. The title of the generated document will be set to the buffer's +file name or, if that's not available, to the buffer's name. + +Note that htmlize doesn't fontify your buffers, it only uses the +decorations that are already present. If you don't set up font-lock or +something else to fontify your buffers, the resulting HTML will be +plain. Likewise, if you don't like the choice of colors, fix the mode +that created them, or simply alter the faces it uses." + (interactive) + (let ((htmlbuf (with-current-buffer (or buffer (current-buffer)) + (htmlize-buffer-1)))) + (when (interactive-p) + (switch-to-buffer htmlbuf)) + htmlbuf)) + +;;;###autoload +(defun htmlize-region (beg end) + "Convert the region to HTML, preserving colors and decorations. +See `htmlize-buffer' for details." + (interactive "r") + ;; Don't let zmacs region highlighting end up in HTML. + (when (fboundp 'zmacs-deactivate-region) + (zmacs-deactivate-region)) + (let ((htmlbuf (save-restriction + (narrow-to-region beg end) + (htmlize-buffer-1)))) + (when (interactive-p) + (switch-to-buffer htmlbuf)) + htmlbuf)) + +(defun htmlize-region-for-paste (beg end) + "Htmlize the region and return just the HTML as a string. +This forces the `inline-css' style and only returns the HTML body, +but without the BODY tag. This should make it useful for inserting +the text to another HTML buffer." + (let* ((htmlize-output-type 'inline-css) + (htmlbuf (htmlize-region beg end))) + (unwind-protect + (with-current-buffer htmlbuf + (buffer-substring (plist-get htmlize-buffer-places 'content-start) + (plist-get htmlize-buffer-places 'content-end))) + (kill-buffer htmlbuf)))) + +(defun htmlize-region-save-screenshot (beg end) + "Save the htmlized (see `htmlize-region-for-paste') region in +the kill ring. Uses `inline-css', with style information in +`
' tags, so that the rendering of the marked up text
+approximates the buffer as closely as possible."
+  (interactive "r")
+  (let ((htmlize-pre-style t))
+    (kill-new (htmlize-region-for-paste beg end)))
+  (deactivate-mark))
+
+(defun htmlize-make-file-name (file)
+  "Make an HTML file name from FILE.
+
+In its default implementation, this simply appends `.html' to FILE.
+This function is called by htmlize to create the buffer file name, and
+by `htmlize-file' to create the target file name.
+
+More elaborate transformations are conceivable, such as changing FILE's
+extension to `.html' (\"file.c\" -> \"file.html\").  If you want them,
+overload this function to do it and htmlize will comply."
+  (concat file ".html"))
+
+;; Older implementation of htmlize-make-file-name that changes FILE's
+;; extension to ".html".
+;(defun htmlize-make-file-name (file)
+;  (let ((extension (file-name-extension file))
+;	(sans-extension (file-name-sans-extension file)))
+;    (if (or (equal extension "html")
+;	    (equal extension "htm")
+;	    (equal sans-extension ""))
+;	(concat file ".html")
+;      (concat sans-extension ".html"))))
+
+;;;###autoload
+(defun htmlize-file (file &optional target)
+  "Load FILE, fontify it, convert it to HTML, and save the result.
+
+Contents of FILE are inserted into a temporary buffer, whose major mode
+is set with `normal-mode' as appropriate for the file type.  The buffer
+is subsequently fontified with `font-lock' and converted to HTML.  Note
+that, unlike `htmlize-buffer', this function explicitly turns on
+font-lock.  If a form of highlighting other than font-lock is desired,
+please use `htmlize-buffer' directly on buffers so highlighted.
+
+Buffers currently visiting FILE are unaffected by this function.  The
+function does not change current buffer or move the point.
+
+If TARGET is specified and names a directory, the resulting file will be
+saved there instead of to FILE's directory.  If TARGET is specified and
+does not name a directory, it will be used as output file name."
+  (interactive (list (read-file-name
+		      "HTML-ize file: "
+		      nil nil nil (and (buffer-file-name)
+				       (file-name-nondirectory
+					(buffer-file-name))))))
+  (let ((output-file (if (and target (not (file-directory-p target)))
+			 target
+		       (expand-file-name
+			(htmlize-make-file-name (file-name-nondirectory file))
+			(or target (file-name-directory file)))))
+	;; Try to prevent `find-file-noselect' from triggering
+	;; font-lock because we'll fontify explicitly below.
+	(font-lock-mode nil)
+	(font-lock-auto-fontify nil)
+	(global-font-lock-mode nil)
+	;; Ignore the size limit for the purposes of htmlization.
+	(font-lock-maximum-size nil))
+    (with-temp-buffer
+      ;; Insert FILE into the temporary buffer.
+      (insert-file-contents file)
+      ;; Set the file name so normal-mode and htmlize-buffer-1 pick it
+      ;; up.  Restore it afterwards so with-temp-buffer's kill-buffer
+      ;; doesn't complain about killing a modified buffer.
+      (let ((buffer-file-name file))
+	;; Set the major mode for the sake of font-lock.
+	(normal-mode)
+	;; htmlize the buffer and save the HTML.
+	(with-current-buffer (htmlize-buffer-1)
+	  (unwind-protect
+	      (progn
+		(run-hooks 'htmlize-file-hook)
+		(write-region (point-min) (point-max) output-file))
+	    (kill-buffer (current-buffer)))))))
+  ;; I haven't decided on a useful return value yet, so just return
+  ;; nil.
+  nil)
+
+;;;###autoload
+(defun htmlize-many-files (files &optional target-directory)
+  "Convert FILES to HTML and save the corresponding HTML versions.
+
+FILES should be a list of file names to convert.  This function calls
+`htmlize-file' on each file; see that function for details.  When
+invoked interactively, you are prompted for a list of files to convert,
+terminated with RET.
+
+If TARGET-DIRECTORY is specified, the HTML files will be saved to that
+directory.  Normally, each HTML file is saved to the directory of the
+corresponding source file."
+  (interactive
+   (list
+    (let (list file)
+      ;; Use empty string as DEFAULT because setting DEFAULT to nil
+      ;; defaults to the directory name, which is not what we want.
+      (while (not (equal (setq file (read-file-name
+				     "HTML-ize file (RET to finish): "
+				     (and list (file-name-directory
+						(car list)))
+				     "" t))
+			 ""))
+	(push file list))
+      (nreverse list))))
+  ;; Verify that TARGET-DIRECTORY is indeed a directory.  If it's a
+  ;; file, htmlize-file will use it as target, and that doesn't make
+  ;; sense.
+  (and target-directory
+       (not (file-directory-p target-directory))
+       (error "target-directory must name a directory: %s" target-directory))
+  (dolist (file files)
+    (htmlize-file file target-directory)))
+
+;;;###autoload
+(defun htmlize-many-files-dired (arg &optional target-directory)
+  "HTMLize dired-marked files."
+  (interactive "P")
+  (htmlize-many-files (dired-get-marked-files nil arg) target-directory))
+
+(provide 'htmlize)
+
+;; Local Variables:
+;; byte-compile-warnings: (not unresolved obsolete)
+;; End:
+
+;;; htmlize.el ends here
diff --git a/lectures/1.org b/lectures/1.org
new file mode 100644
index 0000000..c4021b4
--- /dev/null
+++ b/lectures/1.org
@@ -0,0 +1,41 @@
+* Data structure and Algorithm
++ A *data structure* is a particular way of storing and organizing data. The purpose is to effectively access and modify data effictively.
++ A procedure to solve a specific problem is called *Algorithm*.
+
+During programming we use data structures and algorithms that work on that data.
+
+* Characteristics of Algorithms
+An algorithm has follwing characteristics.
++ *Input* : Zero or more quantities are externally supplied to algorithm.
++ *Output* : An algorithm should produce atleast one output.
++ *Finiteness* : The algorithm should terminate after a finite number of steps. It should not run infinitely.
++ *Definiteness* : Algorithm should be clear and unambiguous. All instructions of an algorithm must have a single meaning.
++ *Effectiveness* : Algorithm must be made using very basic and simple operations that a computer can do.
++ *Language Independance* : A algorithm is language independent and can be implemented in any programming language.
+
+* Behaviour of algorithm
+The behaviour of an algorithm is the analysis of the algorithm on basis of *Time* and *Space*.
++ *Time complexity* : Amount of time required to run the algorithm.
++ *Space complexity* : Amount of space (memory) required to execute the algorithm.
+
+The behaviour of algorithm can be used to compare two algorithms which solve the same problem.
+\\
+The preference is traditionally/usually given to better time complexity. But we may need to give preference to better space complexity based on needs.
+
+** Best, Worst and Average Cases
+The input size tells us the size of the input given to algorithm. Based on the size of input, the time/storage usage of the algorithm changes. *Example*, an array with larger input size (more elements) will taken more time to sort.
++ Best Case : The lowest time/storage usage for the given input size.
++ Worst Case : The highest time/storage usage for the given input size.
++ Average Case : The average time/storage usage for the given input size.
+
+** Bounds of algorithm
+Since algorithms are finite, they have *bounded time* taken and *bounded space* taken. Bounded is short for boundries, so they have a minimum and maximum time/space taken. These bounds are upper bound and lower bound.
++ Upper Bound : The maximum amount of space/time taken by the algorithm is the upper bound. It is shown as a function of worst cases of time/storage usage over all the possible input sizes.
++ Lower Bound : The minimum amount of space/time taken by the algorithm is the lower bound. It is shown as a function of best cases of time/storage usage over all the possible input sizes.
+
+* Asymptotic Notations
+
+** Big-Oh Notation [O]
++ The Big Oh notation is used to define the upper bound of an algorithm.
++ Given a non negative funtion f(n) and other non negative funtion g(n), we say that $f(n) = O(g(n)$ if there exists a positive number $n_0$ and a positive constant $c$, such that \[ f(n) \le c.g(n) \ \ \forall n \ge n_0  \]
++ So if growth rate of g(n) is greater than or equal to growth rate of f(n), then $f(n) = O(g(n))$.
diff --git a/lectures/2.org b/lectures/2.org
new file mode 100644
index 0000000..58f0b8e
--- /dev/null
+++ b/lectures/2.org
@@ -0,0 +1,68 @@
+* Asymptotic Notations
+
+** Omega Notation [ $\Omega$ ]
++ It is used to shown the lower bound of the algorithm. 
++ For any positive integer $n_0$ and a positive constant $c$, we say that, $f(n) = \Omega (g(n))$ if \[ f(n) \ge c.g(n) \ \ \forall n \ge n_0 \]
++ So growth rate of $g(n)$ should be less than or equal to growth rate of $f(n)$
+
+*Note* : If $f(n) = O(g(n))$ then $g(n) = \Omega (f(n))$
+
+** Theta Notation [ $\theta$ ]
++ If is used to provide the asymptotic *equal bound*.
++ $f(n) = \theta (g(n))$ if there exists a positive integer $n_0$ and a positive constants $c_1$ and $c_2$ such that \[ c_1 . g(n) \le f(n) \le c_2 . g(n) \ \ \forall n \ge n_0 \]
++ So the growth rate of $f(n)$ and $g(n)$ should be equal.
+
+*Note* : So if $f(n) = O(g(n))$ and $f(n) = \Omega (g(n))$, then $f(n) = \theta (g(n))$
+
+** Little-Oh Notation [o]
++ The little o notation defines the strict upper bound of an algorithm.
++ We say that $f(n) = o(g(n))$ if there exists positive integer $n_0$ and positive constant $c$ such that, \[ f(n) < c.g(n) \ \ \forall n \ge n_0 \]
++ Notice how condition is <, rather than $\le$ which is used in Big-Oh. So growth rate of $g(n)$ is strictly  greater than that of $f(n)$.
+
+** Little-Omega Notation [ $\omega$ ]
++ The little omega notation defines the strict lower bound of an algorithm.
++ We say that $f(n) = \omega (g(n))$ if there exists positive integer $n_0$ and positive constant $c$ such that, \[ f(n) > c.g(n) \ \ \forall n \ge n_0 \]
++ Notice how condition is >, rather than $\ge$ which is used in Big-Omega. So growth rate of $g(n)$ is strictly less than that of $f(n)$.
+
+* Comparing Growth rate of funtions
+
+** Applying limit
+To compare two funtions $f(n)$ and $g(n)$. We can use limit
+\[ \lim_{n\to\infty} \frac{f(n)}{g(n)} \]
++ If result is 0 then growth of $g(n)$ > growth of $f(n)$
++ If result is $\infty$ then growth of $g(n)$ < growth of $f(n)$
++ If result is any finite number (constant), then growth of $g(n)$ = growth of $f(n)$
+*Note* : L'Hôpital's rule can be used in this limit.
+
+** Using logarithm
+Using logarithm can be useful to compare exponential functions. When comaparing functions $f(n)$ and $g(n)$, 
++ If growth of $\log(f(n))$ is greater than growth of $\log(g(n))$, then growth of $f(n)$ is greater than growth of $g(n)$
++ If growth of $\log(f(n))$ is less than growth of $\log(g(n))$, then growth of $f(n)$ is less than growth of $g(n)$
++ When using log for comparing growth, comaparing constants after applying log is also required. For example, if functions are $2^n$ and $3^n$, then their logs are $n.log(2)$ and $n.log(3)$. Since $log(2) < log(3)$, the growth rate of $3^n$ will be higher.
++ On equal growth after applying log, we can't decide which function grows faster.
+
+** Common funtions
+Commonly, growth rate in increasing order is
+\[  c < c.log(log(n)) < c.log(n) < c.n < n.log(n) < c.n^2 < c.n^3 < c.n^4 ...  \]
+\[ n^c < c^n < n! < n^n  \]
+Where $c$ is any constant.
+
+* Properties of Asymptotic Notations
+
+** Big-Oh
++ *Product* :  \[ Given\ f_1 = O(g_1)\ \ and\ f_2 = O(g_2) \implies f_1 f_2 = O(g_1 g_2) \] \[ Also\  f.O(g) = O(f g) \]
+
++ *Sum* : For a sum of two functions, the big-oh can be represented with only with funcion having higer growth rate. \[ O(f_1 + f_2 + ... + f_i) = O(max\ growth\ rate(f_1, f_2, .... , f_i )) \]
+
++ *Constants* : For a constant $c$ \[ O(c.g(n)) = O(g(n)) \], this is because the constants don't effect the growth rate.
+
+** Properties
+# Taken from https://www.youtube.com/watch?v=pmGau4xHjFM&ab_channel=UnacademyComputerScience (Analysis of an Algorithm - 2 | L 2 | Algorithms | Infinity Batch | GATE 2022 CS/IT | Ankush Sir)
+[[file:./imgs/asymptotic-notations-properties.png]]
+
++ *Reflexive* :  $f(n) = O(f(n)$ and $f(n) = \Omega (f(n))$ and $f(n) = \theta (f(n))$
++ *Symmetric* : If $f(n) = \theta (g(n))$ then $g(n) = \theta (f(n))$
++ *Transitive* : If $f(n) = O(g(n))$ and $g(n) = O(h(n))$ then $f(n) = O(h(n))$
++ *Transpose* : If $f(n) = O(g(n))$ then we can also conclude that $g(n) = \Omega (f(n))$ so we say Big-Oh is transpose of Big-Omega and vice-versa.
++ *Antisymmetric* : If $f(n) = O(g(n))$ and $g(n) = O(f(n))$ then we conclude that $f(n) = g(n)$
++ *Asymmetric* : If $f(n) = \omega (g(n))$ then we can conclude that $g(n) \ne \omega (f(n))$
diff --git a/lectures/3.org b/lectures/3.org
new file mode 100644
index 0000000..2c02475
--- /dev/null
+++ b/lectures/3.org
@@ -0,0 +1,144 @@
+* Calculating time complexity of algorithm
+
+We will look at three types of situations
++ Sequential instructions
++ Iterative instructions
++ Recursive instructions
+
+** Sequential instructions
+A sequential set of instructions are instructions in a sequence without iterations and recursions. It is a simple block of instructions with no branches. A sequential set of instructions has *time complexity of O(1)*, i.e., it has *constant time complexity*.
+
+** Iterative instructions
+A set of instructions in a loop. Iterative instructions can have different complexities based on how many iterations occurs depending on input size. 
+
++ For fixed number of iterations (number of iterations known at compile time i.e. independant of the input size), the time complexity is constant, O(1). Example for(int i = 0; i < 100; i++) { ... } will always have 100 iterations, so constant time complexity.
++ For n number of iterations ( n is the input size ), the time complexity is O(n). Example, a loop for(int i = 0; i < n; i++){ ... } will have n iterations where n is the input size, so complexity is O(n). Loop for(int i = 0; i < n/2; i++){...} also has time complexity O(n) because n/2 iterations are done by loop and 1/2 is constant thus not in big-oh notation.
++ For a loop like for(int i = 1; i <= n; i = i*2){...} the value of i is update as *=2, so the number of iterations will be $log_2 (n)$. Therefore, the time complexity is $O(log_2 (n))$.
++ For a loop like for(int i = n; i > 1; i = i/2){...} the value of i is update as *=2, so the number of iterations will be $log_2 (n)$. Therefore, the time complexity is $O(log_2 (n))$.
+
+*_Nested Loops_*
+\\
++ If *inner loop iterator doesn't depend on outer loop*, the complexity of the inner loop is multiplied by the number of times outer loop runs to get the time complexity For example, suppose we have loop as 
+
+#+BEGIN_SRC
+for(int i = 0; i < n; i++){
+  ...
+  for(int j = 0; j < n; j *= 2){
+    ...
+  }
+  ...
+}
+#+END_SRC
+
+Here, the outer loop will *n* times and the inner loop will run *log(n)* times. Therefore, the total number of time statements in the inner loop run is n.log(n) times.
+Thus the time complexity is *O(n.log(n))*.
+
++ If *inner loop and outer loop are related*, then complexities have to be computed using sums. Example, we have loop
+
+#+BEGIN_SRC
+for(int i = 0; i <= n; i++){
+  ...
+  for(int j = 0; j <= i; j++){
+    ...
+  }
+  ...
+}
+#+END_SRC
+
+Here the outer loop will run *n* times, so i goes from *0 to n*. The number of times inner loop runs is j, which depends on *i*. 
+
+#+ATTR_HTML: :frame border :rules all
+| Value of i | Number of times inner loop runs |
+|------------+---------------------------------|
+| 0          | 0                               |
+| 1          | 1                               |
+| 2          | 2                               |
+| .          | .                               |
+| .          | .                               |
+| .          | .                               |
+| n          | n                               |
+|------------+---------------------------------|
+
+So the total number of times inner loop runs = $1+2+3+....+n$
+\\
+total number of times inner loop runs = $\frac{n.(n+1)}{2}$
+\\
+total number of times inner loop runs = $\frac{n^2}{2} + \frac{n}{2}$
+\\
+*/Therefore, time complexity is/* $O(\frac{n^2}{2} + \frac{n}{2}) = O(n^2)$
+\\
+*Another example,*
+\\
+Suppose we have loop
+#+BEGIN_SRC
+for(int i = 1; i <= n; i++){
+  ...
+  for(int j = 1; j <= i; j *= 2){
+    ...
+  }
+  ...
+}
+#+END_SRC
+
+The outer loop will run n times with i from *1 to n*, and inner will run log(i) times.
+
+#+ATTR_HTML: :frame border :rules all
+| Value of i | Number of times inner loop runs |
+|------------+---------------------------------|
+| 1          | log(1)                          |
+| 2          | log(2)                          |
+| 3          | log(3)                          |
+| .          | .                               |
+| .          | .                               |
+| .          | .                               |
+| n          | log(n)                          |
+|------------+---------------------------------|
+
+Thus, total number of times the inner loop runs is $log(1) + log(2) + log(3) + ... + log(n)$.
+\\
+total number of times inner loop runs = $log(1.2.3...n)$
+\\
+total number of times inner loop runs = $log(n!)$
+\\
+Using */Stirling's approximation/*, we know that $log(n!) = n.log(n) - n + 1$
+\\
+total number of times inner loop runs = $n.log(n) - n + 1$
+\\
+Time complexity = $O(n.log(n))$
+
+** An example for time complexities of nested loops
+Suppose a loop,
+#+BEGIN_SRC
+for(int i = 1; i <= n; i *= 2){
+  ...
+  for(int j = 1; j <= i; j *= 2){
+    ...
+  }
+  ...
+}
+#+END_SRC
+Here, outer loop will run *log(n)* times. Let's consider for some given n, it runs *k* times, i.e, let 
+\[ k = log(n) \]
+
+The inner loop will run *log(i)* times, so number of loops with changing values of i is
+
+#+ATTR_HTML: :frame border :rules all
+| Value of i | Number of times inner loop runs |
+|------------+---------------------------------|
+| 1          | log(1)                          |
+| 2^1        | log(2)                          |
+| 2^2        | 2.log(2)                        |
+| 2^3        | 3.log(2)                        |
+| .          | .                               |
+| .          | .                               |
+| .          | .                               |
+| 2^{k-1}    | (k-1).log(2)                    |
+|------------+---------------------------------|
+
+So the total number of times inner loop runs is $log(1) + log(2) + 2.log(2) + 3.log(2) + ... + (k-1).log(2)$
+\[ \text{number of times inner loop runs} = log(1) + log(2).[1+2+3+...+(k-1)] \]
+\[ \text{number of times inner loop runs} = log(1) + log(2). \frac{(k-1).k}{2} \]
+\[ \text{number of times inner loop runs} = log(1) + log(2). \frac{k^2}{2} - \frac{k}{2} \]
+Putting value $k = log(n)$
+\[ \text{number of times inner loop runs} = log(1) + log(2). \frac{log^2(n)}{2} - \frac{log(n)}{2} \]
+\[ \text{Time complexity} = O(log^2(n)) \]
diff --git a/lectures/4.org b/lectures/4.org
new file mode 100644
index 0000000..68e66f4
--- /dev/null
+++ b/lectures/4.org
@@ -0,0 +1,249 @@
+* Time complexity of recursive instructions
+To get time complexity of recursive functions/calls, we first also show time complexity as recursive manner. 
+
+** Time complexity in recursive form
+We first have to create a way to describe time complexity of recursive functions in form of an equation as,
+\[ T(n) = ( \text{Recursive calls by the function} ) + ( \text{Time taken per call, i.e, the time taken except for recursive calls in the function} ) \]
+
++ Example, suppose we have a recursive function 
+
+#+BEGIN_SRC c
+int fact(int n){
+  if(n == 0 || n == 1)
+    return 1;
+  else
+    return n * fact(n-1);
+}
+#+END_SRC
+
+in this example, the recursive call is fact(n-1), therefore the time complexity of recursive call is T(n-1) and the time complexity of function except for recursive call is constant (let's assume *c*). So the time complexity is 
+\[ T(n) = T(n-1) + c \]
+\[ T(1) = T(0) = C\ \text{where C is constant time} \]
++ Another example,
+
+#+BEGIN_SRC c
+int func(int n){
+  if(n == 0 || n == 1)
+    return 1;
+  else
+    return func(n - 1) * func(n - 2);
+}
+#+END_SRC
+
+Here, the recursive calls are func(n-1) and func(n-2), therefore time complexities of recursive calls is T(n-1) and T(n-2). The time complexity of function except the recursive calls is constant (let's assume *c*), so the time complexity is 
+\[ T(n) = T(n-1) + T(n-2) + c \]
+\[ T(1) = T(0) = C\ \text{where C is constant time} \]
+
++ Another example,
+
+#+BEGIN_SRC c
+int func(int n){
+  int r = 0;
+  for(int i = 0; i < n; i++)
+    r += i;
+
+  if(n == 0 || n == 1)
+    return r;
+  else
+    return r * func(n - 1) * func(n - 2);
+}
+#+END_SRC
+
+Here, the recursive calls are func(n-1) and func(n-2), therefore time complexities of recursive calls is T(n-1) and T(n-2). The time complexity of function except the recursive calls is *\theta (n)* because of the for loop, so the time complexity is 
+
+\[ T(n) = T(n-1) + T(n-2) + n \]
+\[ T(1) = T(0) = C\ \text{where C is constant time} \]
+
+
+* Solving Recursive time complexities
+** Iterative method
++ Take for example,
+\[ T(1) = T(0) = C\ \text{where C is constant time} \]
+\[ T(n) = T(n-1) + c \]
+
+We can expand T(n-1).
+\[ T(n) = [ T(n - 2) + c ] + c \]
+\[ T(n) = T(n-2) + 2.c \]
+Then we can expand T(n-2)
+\[ T(n) =  [ T(n - 3) + c ] + 2.c \]
+\[ T(n) =  T(n - 3) + 3.c \]
+
+So, if we expand it k times, we will get
+
+\[ T(n) = T(n - k) + k.c \]
+Since we know this recursion *ends at T(1)*, let's put $n-k=1$.
+Therefore, $k = n-1$.
+\[ T(n) = T(1) + (n-1).c \]
+
+Since T(1) = C
+\[ T(n) = C + (n-1).c \]
+So time complexity is,
+\[ T(n) = O(n) \]
+
++ Another example, 
+\[ T(1) = C\ \text{where C is constant time} \]
+\[ T(n) = T(n-1) + n \]
+
+Expanding T(n-1),
+\[ T(n) = [ T(n-2) + n - 1 ] + n \]
+\[ T(n) = T(n-2) + 2.n - 1 \]
+
+Expanding T(n-2),
+\[ T(n) = [ T(n-3) + n - 2 ] + 2.n - 1 \]
+\[ T(n) = T(n-3) + 3.n  - 1  - 2 \]
+
+Expanding T(n-3),
+\[ T(n) = [ T(n-4) + n - 3 ] + 3.n  - 1 - 2 \]
+\[ T(n) = T(n-4) + 4.n  - 1 - 2 - 3  \]
+
+So expanding till T(n-k)
+\[ T(n) = T(n-k) + k.n - [ 1 + 2 + 3 + .... + k ] \]
+\[ T(n) = T(n-k) + k.n - \frac{k.(k+1)}{2} \]
+
+Putting $n-k=1$. Therefore, $k=n-1$.
+\[ T(n) = T(1) + (n-1).n - \frac{(n-1).(n)}{2} \]
+\[ T(n) = C + n^2 - n - \frac{n^2}{2} + \frac{n}{2} \]
+
+Time complexity is
+\[ T(n) = O(n^2) \]
+** Master Theorem for Subtract recurrences
+
+For recurrence relation of type
+
+\[ T(n) = c\ for\ n \le 1 \]
+\[ T(n) = a.T(n-b) + f(n)\ for\ n > 1 \]
+\[ \text{where for f(n) we can say, } f(n) = O(n^k) \]
+\[ \text{where, a > 0, b > 0 and k}  \ge 0  \]
+
++ If a < 1, then T(n) = O(n^k)
++ If a = 1, then T(n) = O(n^{k+1})
++ If a > 1, then T(n) = O(n^k . a^{n/b})
+
+Example, \[ T(n) = 3T(n-1) + n^2 \]
+Here, f(n) = O(n^2), therfore k = 2,
+\\ 
+Also, a = 3 and b = 1
+\\
+Since a > 1, $T(n) = O(n^2 . 3^n)$
+
+** Master Theorem for divide and conquer recurrences
+\[ T(n) = aT(n/b) + f(n).(log(n))^k \]
+\[ \text{here, f(n) is a polynomial function} \]
+\[ \text{and, a > 0, b > 0 and k } \ge 0 \]
+We calculate a value $n^{log_ba}$
+
++ If $\theta (f(n)) < \theta ( n^{log_ba} )$ then $T(n) = \theta (n^{log_ba})$
++ If $\theta (f(n)) > \theta ( n^{log_ba} )$ then $T(n) = \theta (f(n).(log(n))^k )$
++ If $\theta (f(n)) = \theta ( n^{log_ba} )$ then $T(n) = \theta (f(n) . (log(n))^{k+1})$
+For the above comparision, we say higher growth rate is greater than slower growth rate. Eg, \theta (n^2) > \theta (n).
+
+Example, calculating complexity for
+\[ T(n) = T(n/2) + 1 \]
+Here, f(n) = 1
+\\
+Also, a = 1, b = 2 and k = 0.
+\\
+Calculating n^{log_ba} = n^{log_21} = n^0 = 1
+\\
+Therfore, \theta (f(n)) = \theta (n^{log_ba})
+\\
+So time complexity is 
+\[ T(n) = \theta ( 1 . (log(n))^{0 + 1} ) \]
+\[ T(n) = \theta (log(n)) \]
+
+Another example, calculate complexity for
+\[ T(n) = 2T(n/2) + nlog(n) \]
+
+Here, f(n) = n
+\\
+Also, a = 2, b = 2 and k = 1
+\\
+Calculating n^{log_ba} = n^{log_22} = n
+\\
+Therefore, \theta (f(n)) = \theta (n^{log_ba})
+\\
+So time complexity is,
+\[ T(n) = \theta ( n . (log(n))^{2}) \]
+
+
+* Square root recurrence relations
+
+** Iterative method
+Example, 
+\[ T(n) = T( \sqrt{n} ) + 1 \]
+we can write this as,
+\[ T(n) = T( n^{1/2}) + 1 \]
+Now, we expand $T( n^{1/2})$
+\[ T(n) = [ T(n^{1/4}) + 1 ] + 1 \]
+\[ T(n) = T(n^{1/(2^2)}) + 1 + 1 \]
+Expand, $T(n^{1/4})$
+\[ T(n) = [ T(n^{1/8}) + 1 ] + 1 + 1 \]
+\[ T(n) =  T(n^{1/(2^3)}) + 1  + 1 + 1 \]
+
+Expanding *k* times,
+\[ T(n) =  T(n^{1/(2^k)}) + 1  + 1 ... \text{k times } + 1 \]
+\[ T(n) =  T(n^{1/(2^k)}) + k \]
+
+Let's consider $T(2)=C$ where C is constant.
+\\
+Putting $n^{1/(2^k)} = 2$
+\[ \frac{1}{2^k} log(n) = log(2) \]
+\[ \frac{1}{2^k} = \frac{log(2)}{log(n)} \]
+\[ 2^k = \frac{log(n)}{log(2)} \]
+\[ 2^k = log_2n \]
+\[ k = log(log(n)) \]
+
+So putting *k* in time complexity equation,
+\[ T(n) = T(2) + log(log(n)) \]
+\[ T(n) = C + log(log(n)) \]
+Time complexity is,
+\[ T(n) = \theta (log(log(n))) \]
+
+** Master Theorem for square root recurrence relations
+
+For recurrence relations with square root, we need to first convert the recurrance relation to the form with which we use master theorem. Example,
+\[ T(n) = T( \sqrt{n} ) + 1 \]
+Here, we need to convert $T( \sqrt{n} )$ , we can do that by *substituting* 
+\[ \text{Substitute } n = 2^m \]
+\[ T(2^m) = T ( \sqrt{2^m} ) + 1 \]
+\[ T(2^m) = T ( 2^{m/2} ) + 1 \]
+
+Now, we need to consider a new function such that,
+\[ \text{Let, } S(m) = T(2^m) \]
+Thus our time recurrance relation will become,
+\[ S(m) = S(m/2) + 1 \]
+Now, we can apply the master's theorem.
+\\
+Here, f(m) = 1
+\\
+Also, a = 1, and b = 2 and k = 0
+\\
+Calculating m^{log_ba} = m^{log_21} = m^0 = 1
+\\
+Therefore, \theta (f(m)) = \theta ( m^{log_ba} )
+\\
+So by master's theorem,
+\[ S(m) = \theta (1. (log(m))^{0+1} ) \]
+\[ S(m) = \theta (log(m) ) \]
+Now, putting back $m = log(n)$
+\[ T(n) = \theta (log(log(n))) \]
+Another example,
+\[ T(n) = 2.T(\sqrt{n})+log(n) \]
+Substituting $n = 2^m$
+\[ T(2^m) = 2.T(\sqrt{2^m}) + log(2^m) \]
+\[ T(2^m) = 2.T(2^{m/2}) + m \]
+Consider a function $S(m) = T(2^m)$
+\[ S(m) = 2.S(m/2) + m \]
+Here, f(m) = m
+\\
+Also, a = 2, b = 2 and k = 0
+\\
+Calculating m^{log_ba} = m^{log_22} = 1
+\\
+Therefore, \theta (f(m)) > \theta (m^{log_ba})
+\\
+Using master's theorem,
+\[ S(m) = \theta (m.(log(m))^0 ) \]
+\[ S(m) = \theta (m.1) \]
+Putting value of m,
+\[ T(n) = \theta (log(n)) \]
diff --git a/lectures/5.org b/lectures/5.org
new file mode 100644
index 0000000..06ed92d
--- /dev/null
+++ b/lectures/5.org
@@ -0,0 +1,253 @@
+* Extended Master's theorem for time complexity of recursive algorithms
+** For (k = -1)
+\[ T(n) = aT(n/b) + f(n).(log(n))^{-1} \]
+\[ \text{Here, } f(n) \text{ is a polynomial function} \]
+\[ a > 0\ and\ b > 1 \]
+
++ If \theta (f(n)) < \theta ( n^{log_ba} ) then, T(n) = \theta (n^{log_ba})
++ If \theta (f(n)) > \theta ( n^{log_ba} ) then, T(n) = \theta (f(n))
++ If \theta (f(n)) < \theta ( n^{log_ba} ) then, T(n) = \theta (f(n).log(log(n)))
+
+** For (k < -1)
+\[ T(n) = aT(n/b) + f(n).(log(n))^{k} \]
+\[ \text{Here, } f(n) \text{ is a polynomial function} \]
+\[ a > 0\ and\ b > 1\ and\ k < -1 \]
+
++ If \theta (f(n)) < \theta ( n^{log_ba} ) then, T(n) = \theta (n^{log_ba})
++ If \theta (f(n)) > \theta ( n^{log_ba} ) then, T(n) = \theta (f(n))
++ If \theta (f(n)) < \theta ( n^{log_ba} ) then, T(n) = \theta (n^{log_ba})
+
+* Tree method for time complexity of recursive algorithms
+Tree method is used when there are multiple recursive calls in our recurrance relation. Example,
+\[ T(n) = T(n/5) + T(4n/5) + f(n) \]
+Here, one call is T(n/5) and another is T(4n/5). So we can't apply master's theorem. So we create a tree of recursive calls which is used to calculate time complexity.
+The first node, i.e the root node is T(n) and the tree is formed by the child nodes being the calls made by the parent nodes. Example, let's consider the recurrance relation
+\[ T(n) = T(n/5) + T(4n/5) + f(n) \]
+
+#+BEGIN_SRC
+      +-----T(n/5)
+T(n)--+
+      +-----T(4n/5)
+#+END_SRC
+
+Since T(n) calls T(n/5) and  T(4n/5), the graph for that is shown as drawn above. Now using recurrance relation, we can say that T(n/5) will call T(n/5^2) and T(4n/5^2). Also, T(4n/5) will call T(4n/5^2) and T(4^2 n/ 5^2).
+
+#+BEGIN_SRC
+                    +--T(n/5^2)
+      +-----T(n/5)--+
+      +             +--T(4n/5^2)
+T(n)--+
+      +             +--T(4n/5^2)
+      +-----T(4n/5)-+
+                    +--T(4^2 n/5^2)
+#+END_SRC
+
+Suppose we draw this graph for an unknown number of levels.
+
+#+BEGIN_SRC
+                    +--T(n/5^2)- - - - - - -  etc.
+      +-----T(n/5)--+
+      +             +--T(4n/5^2) - - - - - - - - - etc.
+T(n)--+
+      +             +--T(4n/5^2) - - - - - -  - - - etc.
+      +-----T(4n/5)-+
+                    +--T(4^2 n/5^2)- - - - - - etc.
+#+END_SRC
+
+We will now replace T()'s  with the *cost of the call*. The cost of the call is *f(n)*, i.e, the time taken other than that caused by the recursive calls.
+
+#+BEGIN_SRC
+                    +--f(n/5^2)- - - - - - -  etc.
+      +-----f(n/5)--+
+      +             +--f(4n/5^2) - - - - - - - - - etc.
+f(n)--+
+      +             +--f(4n/5^2) - - - - - -  - - - etc.
+      +-----f(4n/5)-+
+                    +--f(4^2 n/5^2)- - - - - - etc.
+#+END_SRC
+
+In our example, *let's assume f(n) = n*, therfore,
+
+#+BEGIN_SRC
+                    +--  n/5^2 - - - - - - -  etc.
+      +-----  n/5 --+
+      +             +-- 4n/5^2  - - - - - - - - - etc.
+  n --+
+      +             +--  4n/5^2  - - - - - -  - - -etc.
+      +-----  4n/5 -+
+                    +--  4^2 n/5^2 - - - - - -  etc.
+#+END_SRC
+
+Now we can get cost of each level.
+
+#+BEGIN_SRC
+                           +--  n/5^2 - - - - - - -  etc.
+             +-----  n/5 --+
+             +             +-- 4n/5^2  - - - - - - - - - etc.
+         n --+
+             +             +--  4n/5^2  - - - - - -  - - -etc.
+             +----- 4n/5 --+
+                           +--  4^2 n/5^2 - - - - - -  etc.
+
+       
+Sum :    n         n/5         n/25                      
+                  +4n/5       +4n/25
+                              +4n/25
+                              +16n/25
+       .....      .....       ......
+         n          n           n
+#+END_SRC
+
+Since sum on all levels is n, we can say that Total time taken is
+\[ T(n) = \Sigma \ (cost\ of\ level_i) \]
+
+Now we need to find the longest branch in the tree. If we follow the pattern of expanding tree in a sequence as shown, then the longest branch is *always on one of the extreme ends of the tree*. So for our example, if tree has *(k+1)* levels, then our branch is either (n/5^k) of (4^k n/5^k). Consider the terminating condition is, $T(a) = C$. Then we will calculate value of k by equating the longest branch as, 
+\[ \frac{n}{5^k} = a \]
+\[ k = log_5 (n/a) \]
+Also,
+\[ \frac{4^k n}{5^k} = a \]
+\[ k = log_{5/4} n/a \]
+
+So, we have two possible values of k, 
+\[ k = log_{5/4}(n/a),\ log_5 (n/a) \]
+
+Now, we can say that, 
+\[ T(n) = \sum_{i=1}^{k+1} \ (cost\ of\ level_i) \]
+Since in our example, cost of every level is *n*.
+\[ T(n) = n.(k+1) \]
+Putting values of k,
+\[ T(n) = n.(log_{5/4}(n/a) + 1) \]
+or
+\[ T(n) = n.(log_{5}(n/a) + 1) \]
+
+Of the two possible time complexities, we consider the one with higher growth rate in the big-oh notation.
+
+** Avoiding tree method
+The tree method as mentioned is mainly used when we have multiple recursive calls with different factors. But when using the big-oh notation (O). We can avoid tree method in favour of the master's theorem by converting recursive call with smaller factor to larger. This works since big-oh calculates worst case. Let's take our previous example
+\[ T(n) = T(n/5) + T(4n/5) + f(n) \]
+Since T(n) is an increasing function. We can say that
+\[ T(n/5) < T(4n/5)  \]
+So we can replace smaller one and approximate our equation to,
+\[ T(n) = T(4n/5) + T(4n/5) + f(n) \]
+\[ T(n) = 2.T(4n/5) + f(n) \]
+
+Now, our recurrance relation is in a form where we can apply the mater's theorem.
+
+* Space complexity
+The amount of memory used by the algorithm to execute and produce the result for a given input size is space complexity. Similar to time complexity, when comparing two algorithms space complexity is usually represented as the growth rate of memory used with respect to input size. The space complexity includes
++ *Input space* : The amount of memory used by the inputs to the algorithm.
++ *Auxiliary space* : The amount of memory used during the execution of the algorithm, excluding the input space.
+
+*NOTE* : /Space complexity by definition includes both input space and auxiliary space, but when comparing algorithms the input space is often ignored. This is because two algorithms that solve the same problem will have same input space based on input size (Example, when comparing two sorting algorithms, the input space will be same because both get a list as an input). So from this point on, refering to space complexity, we are actually talking about *Auxiliary Space Complexity*, which is space complexity but only considering the auxiliary space/.
+
+** Auxiliary space complexity
+
+The space complexity when we disregard the input space is the auxiliary space complexity, so we basically treat algorithm as if it's input space is zero. Auxiliary space complexity is more useful when comparing algorithms because the algorithms which are working towards same result will have the same input space, Example, the sorting algorithms will all have the input space of the list, so it is not a metric we can use to compare algorithms. So from here, when we calculate space complexity, we are trying to calculate auxiliary space complexity and sometimes just refer to it as space complexity.
+
+* Calculating auxiliary space complexity
+There are two parameters that affect space complexity,
++ *Data space* : The memory taken by the variables in the algorithm. So allocating new memory during runtime of the algorithm is what forms the data space. The space which was allocated for the input space is not considered a part of the data space.
++ *Code Execution Space* : The memory taken by the instructions themselves is called code execution space. Unless we have recursion, the code execution space remains constant since the instructions don't change during runtime of the algorithm. When using recursion, the instructions are loaded again and again in memory, thus increasing code execution space.
+
+** Data Space used
+The data space used by the algorithm depends on what data structures it uses to solve the problem. Example,
+
+#+BEGIN_SRC C
+  /* Input size of n */
+  void algorithms(int n){
+    /* Creating an array of whose size depends on input size */
+    int data[n];
+
+    for(int i = 0; i < n; i++){
+      int x = data[i];
+      // Work on data
+    }
+  }
+#+END_SRC
+
+Here, we create an array of size *n*, so the increase in allocated space increases with the input size. So the space complexity is, *$\theta (n)$*.
+\\
++ Another example,
+
+#+BEGIN_SRC C
+  /* Input size of n */
+  void algorithms(int n){
+    /* Creating a matrix sized n*n of whose size depends on input size */
+    int data[n][n];
+
+    for(int i = 0; i < n; i++){
+      for(int j = 0; j < n; j++){
+	int x = data[i][j];
+	// Work on data
+      }
+    }
+  }
+#+END_SRC
+
+Here, we create a matrix of size *n*n*, so the increase in allocated space increases with the input size by $n^2$. So the space complexity is, *$\theta (n^2)$*.
+
++ If we use a node based data structure like linked list or trees, then we can show space complexity as the number of nodes used by algorithm based on input size, (if all nodes are of equal size).
++ Space complexity of the hash map is considered *O(n)* where *n* is the number of entries in the hash map.
+
+** Code Execution space in recursive algorithm
+When we use recursion, the function calls are stored in the stack. This means that code execution space will increase. A single function call has fixed (constant) space it takes in the memory. So to get space complexity, *we need to know how many function calls occur in the longest branch of the function call tree*.
+
++ *NOTE* : Space complexity *only depends on the longest branch* of the function calls tree.
++ /*The tree is made the same way we make it in the tree method for calculating time complexity of recursive algorithms*/
+  
+This is because at any given time, the stack will store only a single branch.
+
++ Example,
+
+#+BEGIN_SRC C
+  int func(int n){
+    if(n == 1 || n == 0)
+      return 1;
+    else
+      return n * func(n - 1);
+  }
+#+END_SRC
+
+To calculate space complexity we can use the tree method. But rather than when calculating time complexity, we will count the number of function calls using the tree.
+We will do this by drawing tree of what function calls will look like for given input size *n*.
+\\
+The tree for *k+1* levels is,
+
+#+BEGIN_SRC
+  func(n)--func(n-1)--func(n-2)--.....--func(n-k)
+#+END_SRC
+
+This tree only has a single branch. To get the number of levels for a branch, we put the terminating condition at the extreme branches of the tree. Here, the terminating condition is func(1), therefore, we will put $func(1) = func(n-k)$, i.e,
+\[ 1 = n - k \]
+\[ k + 1 = n \]
+
+So the number of levels is $n$. Therefore, space complexity is *$\theta (n)$*
+
++ Another example,
+
+#+BEGIN_SRC c
+  void func(int n){
+    if(n/2 <= 1)
+      return n;
+    func(n/2);
+    func(n/2);
+  }
+#+END_SRC
+
+Drawing the tree for *k+1* levels.
+#+BEGIN_SRC
+                          +--func(n/2^2)- - - - - - -  func(n/2^k)
+         +-----func(n/2)--+
+         +                +--func(n/2^2) - - - - - - - - - func(n/2^k)
+func(n)--+
+         +               +--func(n/2^2) - - - - - -  - - - func(n/2^k)
+         +-----func(n/2)-+
+                         +--func(n/2^2)- - - - - - func(n/2^k)
+#+END_SRC
+
++ /*As we know from the tree method, the two extreme branches of the tree will always be the longest ones.*/
+
+Both the extreme branches have the same call which here is func(n/2^k). To get the number of levels for a branch, we put the terminating condition at the extreme branches of the tree. Here, the terminating condition is func(2), therefore, we will put $func(2) = func(n/2^k)$, i.e,
+\[ 2 = \frac{n}{2^k} \]
+\[ k + 1 = log_2n \]
+Number of levels is $log_2n$. Therefore, space complexity is *$\theta (log_2n)$.*
diff --git a/lectures/6.org b/lectures/6.org
new file mode 100644
index 0000000..e70a236
--- /dev/null
+++ b/lectures/6.org
@@ -0,0 +1,253 @@
+* Divide and Conquer algorithms
+Divide and conquer is a problem solving strategy. In divide and conquer algorithms, we solve problem recursively applying three steps :
++ *Divide* : Problem is divided into smaller problems that are instances of same problem.
++ *Conquer* : If subproblems are large, divide and solve them recursivly. If subproblem is small enough then solve it in a straightforward method
++ *Combine* : combine the solutions of subproblems into the solution for the original problem.
+
+*Example*,
+1. Binary search
+2. Quick sort
+3. Merge sort
+4. Strassen's matrix multiplication
+
+* Searching for element in array
+** Straight forward approach for searching (*Linear Search*)
+
+#+BEGIN_SRC C
+  int linear_search(int *array, int n, int x){
+    for(int i = 0; i < n; i++){
+      if(array[i] == x){
+	printf("Found at index : %d", i);
+	return i;
+      }
+    }
+    return -1;
+  }
+#+END_SRC
+
+Recursive approach
+
+#+BEGIN_SRC python
+  # call this function with index = 0
+  def linear_search(array, item, index):
+      if len(array) < 1:
+	  return -1
+      elif array[index] == item:
+	  return index
+      else:
+	  return linear_search(array, item, index + 1)
+#+END_SRC
+
+*Recursive time complexity* : $T(n) = T(n-1) + 1$
+
++ *Best Case* : The element to search is the first element of the array. So we need to do a single comparision. Therefore, time complexity will be constant *O(1)*.
+\\
++ *Worst Case* : The element to search is the last element of the array. So we need to do *n* comparisions for the array of size n. Therefore, time complexity is *O(n)*.
+\\
++ *Average Case* : For calculating the average case, we need to consider the average number of comparisions done over all possible cases. 
+
+#+ATTR_HTML: :frame border :rules all
+| Position of element to search (x) | Number of comparisions done |
+|-----------------------------------+-----------------------------|
+| 0                                 | 1                           |
+| 1                                 | 2                           |
+| 2                                 | 3                           |
+| .                                 | .                           |
+| .                                 | .                           |
+| .                                 | .                           |
+| n-1                               | n                           |
+| ....................              | ....................        |
+| Sum                               | $\frac{n(n+1)}{2}$          |
+|-----------------------------------+-----------------------------|
+
+\[ \text{Average number of comparisions} = \frac{ \text{Sum of number of comparisions of all cases} }{ \text{Total number of cases.} } \]
+\[ \text{Average number of comparisions} = \frac{n(n+1)}{2} \div n \]
+\[ \text{Average number of comparisions} = \frac{n+1}{2} \]
+\[ \text{Time complexity in average case} = O(n) \]
+
+** Divide and conquer approach (*Binary search*)
+
+The binary search algorithm works on an array which is sorted. In this algorithm we:
+1. Check the middle element of the array, return the index if element found.
+2. If element > array[mid], then our element is in the right part of the array, else it is in the left part of the array.
+3. Get the mid element of the left/right sub-array
+4. Repeat this process of division to subarray's and comparing the middle element till our required element is found.
+
+The divide and conquer algorithm works as,
+\\
+Suppose binarySearch(array, left, right, key), left and right are indicies of left and right of subarray. key is the element we have to search.
++ *Divide part* : calculate mid index as mid = left + (right - left) /2 or (left + right) / 2. If array[mid] == key, return the value of mid.
++ *Conquer part* : if array[mid] > key, then key must not be in right half. So we search for key in left half, so we will recursively call binarySearch(array, left, mid - 1, key). Similarly, if array[mid] < key, then key must not be in left half. So we search for key in right half, so recursively call binarySearch(array, mid + 1, right, key).
++ *Combine part* : Since the binarySearch function will either return -1 or the index of the key, there is no need to combine the solutions of the subproblems.
+
+[[./imgs/binary-search.jpg]]
+
+#+BEGIN_SRC C
+  int binary_search(int *array, int n, int x){
+    int low = 0;
+    int high = n;
+
+    int mid = (low + high) / 2;
+
+    while(low <= high){
+      mid = (low + high) / 2;
+      if (x == array[mid]){
+	return mid;
+      }else if (x < array[mid]){
+	low = low;
+	high = mid - 1;
+      }else{
+	low = mid + 1;
+	high = high;
+      }
+    }
+
+    return -1;
+  }
+#+END_SRC
+
+Recursive approach:
+
+#+BEGIN_SRC C
+  int binary_search(int *array, int left, int right, int x){
+    if(left > right)
+      return -1;
+
+    int mid = (left + right) / 2;
+    // or we can use mid = left + (right - left) / 2, this will avoid int overflow when array has more elements.
+    
+    if (x == array[mid])
+      return mid;
+    else if (x < array[mid])
+      return binary_search(array, left, mid - 1, x);
+    else
+      return binary_search(array, mid + 1, right, x);
+  }
+#+END_SRC
+ 
+*Recursive time complexity* : $T(n) = T(n/2) + 1$
+
++ *Best Case* : Time complexity = O(1)
++ *Average Case* : Time complexity = O(log n)
++ *Worst Case* : Time complexity = O(log n)
+
+/Binary search is better for sorted arrays and linear search is better for sorted arrays./
+\\
+/Another way to visualize binary search is using the binary tree./
+
+* Max and Min element from array
+
+** Straightforward approach
+#+BEGIN_SRC python
+  def min_max(a):
+      max = min = a[1]
+      for i in range(2, n):
+	  if a[i] > max:
+	      max = a[i];
+	  elif a[i] < min:
+	      min = a[i];
+
+      return (min,max)
+#+END_SRC
+
++ *Best case* : array is sorted in ascending order. Number of comparisions is $n-1$. Time complexity is $O(n)$.
++ *Worst case* : array is sorted in descending order. Number of comparisions is $2.(n-1)$. Time complexity is $O(n)$.
++ *Average case* : array can we arranged in n! ways, this makes calculating number of comparisions in the average case hard and it is somewhat unnecessary, so it is skiped. Time complexity is $O(n)$
+
+** Divide and conquer approach
+Suppose the function is MinMax(array, left, right) which will return a tuple (min, max). We will divide the array in the middle, mid = (left + right) / 2. The left array will be array[left:mid] and right aray will be array[mid+1:right]
++ *Divide part* : Divide the array into left array and right array. If array has only single element then both min and max are that single element, if array has two elements then compare the two and the bigger element is max and other is min.
++ *Conquer part* : Recursively get the min and max of left and right array, leftMinMax = MinMax(array, left, mid)  and rightMinMax = MinMax(array, mid + 1, right).
++ *Combine part* : If leftMinMax[0] > rightMinmax[0], then min = righMinMax[0], else min = leftMinMax[0]. Similarly, if leftMinMax[1] > rightMinMax[1], then max = leftMinMax[1], else max = rightMinMax[1].
+
+#+BEGIN_SRC python
+  # Will return (min, max)
+  def minmax(array, left, right):
+      if left == right:       # Single element in array
+	  return (array[left], array[left])
+      elif left + 1 == right: # Two elements in array
+	  if array[left] > array[right]:
+	      return (array[right], array[left])
+	  else:
+	      return (array[left], array[right])
+      else:                  # More than two elements
+	  mid = (left + right) / 2
+	  minimum, maximum = 0, 0
+	  leftMinMax = minmax(array, left, mid)
+	  rightMinMax = minmax(array, mid + 1, right)
+
+	  # Combining result of the minimum from left and right subarray's
+	  if leftMinMax[0] > rightMinMax[0]:
+	      minimum = rightMinMax[0]
+	  else:
+	      minimum = leftMinMax[0]
+
+	  # Combining result of the maximum from left and right subarray's
+	  if leftMinMax[1] > rightMinMax[1]:
+	      maximum = leftMinMax[1]
+	  else:
+	      maximum = rightMinMax[1]
+	  
+	  return (minimum, maximum)
+
+#+END_SRC
+
++ Time complexity
+We are dividing the problem into two parts of approximately, and it takes two comparisions on each part. Let's consider a comparision takes unit time. Then time complexity is
+\[ T(n) = T(n/2) + T(n/2) + 2 \]
+\[ T(n) = 2.T(n/2) + 2 \]
+The recurrance terminated if single element in array with zero comparisions, i.e, $T(1) = 0$, or when two elements with single comparision $T(2) = 1$.
+\\
+/Now we can use the *master's theorem* or *tree method* to solve for time complexity./
+\[ T(n) = \theta (n) \]
+
++ Space complexity
+For space complexity, we need to find the longest branch of the recursion tree. Since both recursive calls are same sized, and the factor is (1/2), for *k+1* levels, function call will be func(n/2^k), and terminating condition is func(2)
+\[ func(2) = func(n/2^k) \]
+\[ 2 = \frac{n}{2^k} \]
+\[ k + 1 = log_2n \]
+Since longest branch has $log_2n$ nodes, the space complexity is $O(log_2n)$.
+
++ Number of comparisions
+In every case i.e, average, best and worst cases, *the number of comparisions in this algorithm is same*.
+\[ \text{Total number of comparisions} = \frac{3n}{2} - 2 \]
+If n is not a power of 2, we will round the number of comparision up.
+
+** Efficient single loop approach (Increment by 2)
+
+In this algorithm we will compare pairs of numbers from the array. It works on the idea that the larger number of the two in pair can be the maximum number and smaller one can be the minimum one. So after comparing the pair, we can simply test from maximum from the bigger of two an minimum from smaller of two. This brings number of comparisions to check two numbers in array from 4 (when we increment by 1) to 3 (when we increment by 2).
+
+#+BEGIN_SRC python
+def min_max(array):
+    (minimum, maximum) = (array[0], array[0])
+    i = 1
+    while i < len(array):
+        if i + 1 == len(array):  # don't check i+1, it's out of bounds, break the loop after checking a[i]
+            if array[i] > maximum:
+                maximum = array[i]
+            elif array[i] < minimum:
+                minimum = array[i]
+            break
+
+        if array[i] > array[i + 1]:
+            # check possibility that array[i] is maximum and array[i+1] is minimum
+            if array[i] > maximum:
+                maximum = array[i]
+            if array[i + 1] < minimum:
+                minimum = array[i + 1]
+        else:
+            # check possibility that array[i+1] is maximum and array[i] is minimum
+            if array[i + 1] > maximum:
+                maximum = array[i + 1]
+            if array[i] < minimum:
+                minimum = array[i]
+
+        i += 2
+    return (minimum, maximum)
+#+END_SRC
+
++ Time complexity = O(n)
++ Space complexity = O(1)
++ Total number of comparisions =
+  \[ \text{If n is odd},  \frac{3(n-1)}{2} \]
+  \[ \text{If n is even}, \frac{3n}{2} - 2 \]
diff --git a/lectures/7.org b/lectures/7.org
new file mode 100644
index 0000000..88bd4c1
--- /dev/null
+++ b/lectures/7.org
@@ -0,0 +1,165 @@
+* Square matrix multiplication
+
+Matrix multiplication algorithms taken from here: 
+[[https://www.cs.mcgill.ca/~pnguyen/251F09/matrix-mult.pdf]]
+
+** Straight forward method
+
+#+BEGIN_SRC C
+  /* This will calculate A X B and store it in C. */
+  #define N 3
+
+  int main(){
+    int A[N][N] = {
+      {1,2,3},
+      {4,5,6},
+      {7,8,9} };
+
+    int B[N][N] = {
+      {10,20,30},
+      {40,50,60},
+      {70,80,90} };
+
+    int C[N][N];
+
+    for(int i = 0; i < N; i++){
+      for(int j = 0; j < N; j++){
+	C[i][j] = 0;
+	for(int k = 0; k < N; k++){
+	  C[i][j] += A[i][k] * B[k][j];
+	}
+      }
+    }
+
+    return 0;
+  }
+#+END_SRC
+
+Time complexity is $O(n^3)$
+
+** Divide and conquer approach
+The divide and conquer algorithm only works for a square matrix whose size is n X n, where n is a power of 2. The algorithm works as follows.
+
+#+BEGIN_SRC
+  MatrixMul(A, B, n):
+    If n == 2 {
+      return A X B
+    }else{
+      Break A into four parts A_11, A_12, A_21, A_22, where A = [[ A_11, A_12],
+                                                                 [ A_21, A_22]]
+  
+      Break B into four parts B_11, B_12, B_21, B_22, where B = [[ B_11, B_12],
+                                                                 [ B_21, B_22]]
+
+      C_11 = MatrixMul(A_11, B_11, n/2) + MatrixMul(A_12, B_21, n/2)
+      C_12 = MatrixMul(A_11, B_12, n/2) + MatrixMul(A_12, B_22, n/2)
+      C_21 = MatrixMul(A_21, B_11, n/2) + MatrixMul(A_22, B_21, n/2)
+      C_22 = MatrixMul(A_21, B_12, n/2) + MatrixMul(A_22, B_22, n/2)
+  
+      C = [[ C_11, C_12],
+           [ C_21, C_22]]
+  
+      return C
+    }
+#+END_SRC
+
+The addition of matricies of size (n X n) takes time $\theta (n^2)$, therefore, for computation of C_11 will take time of $\theta \left( \left( \frac{n}{2} \right)^2 \right)$, which is equals to $\theta \left( \frac{n^2}{4} \right)$. Therefore, computation time of C_11, C_12, C_21 and C_22 combined will be $\theta \left( 4 \frac{n^2}{4} \right)$, which is equals to $\theta (n^2)$.
+\\
+There are 8 recursive calls in this function with MatrixMul(n/2), therefore, time complexity will be
+\[ T(n) = 8T(n/2) + \theta (n^2) \]
+Using the *master's theorem*
+\[ T(n) = \theta (n^{log_28}) \]
+\[ T(n) = \theta (n^3) \]
+
+** Strassen's algorithm
+
+Another, more efficient divide and conquer algorithm for matrix multiplication. This algorithm also only works on square matrices with n being a power of 2. This algorithm is based on the observation that, for A X B = C. We can calculate C_11, C_12, C_21 and C_22 as,
+
+\[ \text{C_11 = P_5 + P_4 - P_2 + P_6} \]
+\[ \text{C_12 = P_1 + P_2} \]
+\[ \text{C_21 = P_3 + P_4} \]
+\[ \text{C_22 = P_1 + P _5 - P_3 - P_7} \]
+Where,
+\[ \text{P_1 = A_11 X (B_12 - B_22)} \]
+\[ \text{P_2 = (A_11 + A_12) X B_22} \]
+\[ \text{P_3 = (A_21 + A_22) X B_11} \]
+\[ \text{P_4 = A_22 X (B_21 - B_11)} \]
+\[ \text{P_5 = (A_11 + A_22) X (B_11 + B_22)} \]
+\[ \text{P_6 = (A_12 - A_22) X (B_21 + B_22)} \]
+\[ \text{P_7 = (A_11 - A_21) X (B_11 + B_12)} \]
+This reduces number of recursion calls from 8 to 7.
+
+#+BEGIN_SRC
+Strassen(A, B, n):
+  If n == 2 {
+    return A X B
+  }
+  Else{
+    Break A into four parts A_11, A_12, A_21, A_22, where A = [[ A_11, A_12],
+                                                               [ A_21, A_22]]
+  
+    Break B into four parts B_11, B_12, B_21, B_22, where B = [[ B_11, B_12],
+                                                               [ B_21, B_22]]
+    P_1 = Strassen(A_11, B_12 - B_22, n/2)
+    P_2 = Strassen(A_11 + A_12, B_22, n/2)
+    P_3 = Strassen(A_21 + A_22, B_11, n/2)
+    P_4 = Strassen(A_22, B_21 - B_11, n/2)
+    P_5 = Strassen(A_11 + A_22, B_11 + B_22, n/2)
+    P_6 = Strassen(A_12 - A_22, B_21 + B_22, n/2)
+    P_7 = Strassen(A_11 - A_21, B_11 + B_12, n/2)
+    C_11 = P_5 + P_4 - P_2 + P_6
+    C_12 = P_1 + P_2
+    C_21 = P_3 + P_4
+    C_22 = P_1 + P_5 - P_3 - P_7
+    C = [[ C_11, C_12],
+         [ C_21, C_22]]
+    return C
+  }
+#+END_SRC
+
+This algorithm uses 18 matrix addition operations. So our computation time for that is $\theta \left(18\left( \frac{n}{2} \right)^2 \right)$ which is equal to $\theta (4.5 n^2)$ which is equal to $\theta (n^2)$.
+\\
+There are 7 recursive calls in this function which are Strassen(n/2), therefore, time complexity is
+\[ T(n) = 7T(n/2) + \theta (n^2) \]
+Using the master's theorem
+\[ T(n) = \theta (n^{log_27}) \]
+\[ T(n) = \theta (n^{2.807}) \]
+
+
++ /*NOTE* : The divide and conquer approach and strassen's algorithm typically use n == 1 as their terminating condition since for multipliying 1 X 1 matrices, we only need to calculate product of the single element they contain, that product is thus the single element of our resultant 1 X 1 matrix./
+
+* Sorting algorithms
+
+** In place vs out place sorting algorithm
+If the space complexity of a sorting algorithm is $\theta (1)$, then the algorithm is called in place sorting, else the algorithm is called out place sorting.
+
+** Bubble sort
+Simplest sorting algorithm, easy to implement so it is useful when number of elements to sort is small. It is an in place sorting algorithm. We will compare pairs of elements from array and swap them to be in correct order. Suppose input has n elements.
++ For first pass of the array, we will do *n-1* comparisions between pairs, so 1st and 2nd element; then 2nd and 3rd element; then 3rd and 4th element; till comparision between (n-1)th and nth element, swapping positions according to the size. /A single pass will put a single element at the end of the list at it's correct position./
++ For second pass of the array, we will do *n-2* comparisions because  the last element is already in it's place after the first pass.
++ Similarly, we will continue till we only do a single comparision.
++ The total number of comparisions will be
+  \[ \text{Total comparisions} = (n - 1) + (n - 2) + (n - 3) + ..... + 2 + 1 \]
+  \[ \text{Total comparisions} = \frac{n(n-1)}{2} \]
+  Therefore, *time complexity is $\theta (n^2)$*
+
+#+BEGIN_SRC C
+  void binary_search(int array[]){
+    /* i is the number of comparisions in the pass */
+    for(int i = len(array) - 1; i >= 1; i--){
+      /* j is used to traverse the list */
+      for(int j = 0; j < i; j++){
+	if(array[j] > array[j+1])
+	  array[j], array[j+1] = array[j+1], array[j];
+      }
+    }
+  }
+#+END_SRC
+
+*/Minimum number of swaps can be calculated by checking how many swap operations are needed to get each element in it's correct position./* This can be done by checking the number of smaller elements towards the left. For descending, check the number of larger elements towards the left of the given element. Example for ascending sort,
+| Array                                              | 21 | 16 | 17 | 8 | 31 | 
+| Minimum number of swaps to get in correct position |  3 |  1 |  0 | 0 |  0 |
+Therefore, minimum number of swaps is ( 3 + 1 + 0 + 0 + 0) , which is equal to 4 swaps.
+
++ */Reducing number of comparisions in implementation/* : at the end of every pass, check the number of swaps. *If number of swaps in a pass is zero, then the array is sorted.* This implementation does not give minimum number of comparisions, but reduces number of comparisions from default implementation. It reduces the time complexity to $\theta (n)$ for best case scenario, since we only need to pass through array once.
+Recursive time complexity : $T(n)  = T(n-1) + n - 1$
diff --git a/lectures/8.org b/lectures/8.org
new file mode 100644
index 0000000..4d5a0be
--- /dev/null
+++ b/lectures/8.org
@@ -0,0 +1,93 @@
+* Selection sort
+It is an inplace sorting technique. In this algorithm, we will get the minimum element from the array, then we swap it to the first position. Now we will get the minimum from array[1:] and place it in index 1. Similarly, we get minimum from array[2:] and then place it on index 2. We do till we get minimum from array[len(array) - 2:] and place minimum on index [len(array) - 2].
+
+#+BEGIN_SRC C
+  void selection_sort(int array[]){
+    for( int i = 0; i < len(array)-2 ; i++ ) {
+      /* Get the minimum index from the sub-array [i:] */
+      int min_index = i;
+      for( int j = i+1; j < len(array) - 1; j++ )
+	if (array[j] < array[min_index]) { min_index = j; }
+
+      /* Swap the min_index with it's position at start of sub-array */
+      array[i], array[min_index] = array[min_index], array[i];
+    }
+  }
+#+END_SRC
+
+The total number of comparisions is,
+\[ \text{Total number of comparisions} = (n -1) + (n-2) + (n-3) + ... + (1) \]
+\[ \text{Total number of comparisions} = \frac{n(n-1)}{2} \]
+For this algorithm, number of comparisions are same in best, average and worst case.
+Therefore the time complexity in all cases is, \[ \text{Time complexity} = \theta (n) \]
+
++ Recurrance time complexity : $T(n) = T(n-1) + n - 1$
+
+* Insertion sort
+It is an inplace sorting algorithm.
++ In this algorithm, we first divide array into two sections. Initially, the left section has a single element and right section has all the other elements. Therefore, the left part is sorted and right part is unsorted.
++ We call the leftmost element of the right section the key.
++ Now, we insert the key in it's correct position, in left section.
++ As commanly known, for insertion operation we need to shift elements. So we shift elements in the left section.
+
+#+BEGIN_SRC C
+  void insertion_sort ( int array[] ) {
+    for( int i = 1; i < len(array); i++ ) {
+      /* Key is the first element of the right section of array */
+      int key = array[j];
+      int j = i - 1;
+
+      /* Shift till we find the correct position of the key in the left section */
+      while ( j > 0 && array[j] > key ) {
+	array[j + 1] = array[j];
+	j -= 1;
+      }
+      /* Insert key in it's correct position */
+      array[j+1] = key;
+    }
+  }
+#+END_SRC
+
++ Time complexity
+
+*Best Case* : The best case is when input array is already sorted. In this case, we do *(n-1)* comparisions and no swaps. The time complexity will be $\theta (n)$
+\\
+*Worst Case* : The worst case is when input array is is descending order when we need to sort in ascending order and vice versa (basically reverse of sorted). The number of comparisions is
+\\
+\[ [1 + 2 + 3 + .. + (n-1)] = \frac{n(n-1)}{2} \]
+\\
+The number of element shift operations is
+\\
+\[ [1 + 2 + 3 + .. + (n-1)] =  \frac{n(n-1)}{2} \]
+\\
+Total time complexity becomes $\theta \left( 2 \frac{n(n-1)}{2} \right)$, which is simplified to $\theta (n^2)$.
+
++ *NOTE* : Rather than using *linear search* to find the position of key in the left (sorted) section, we can use *binary search* to reduce number of comparisions.
+
+* Inversion in array
+The inversion of array is the measure of how close array is from being sorted.
+\\
+For an ascending sort, it is the amount of element pairs such that array[i] > array[j] and i < j OR IN OTHER WORDS array[i] < array[j]  and i > j.
++ For *ascending sort*, we can simply look at the number of elements to left of the given element that are smaller.
+  
+| Array      | 10 | 6 | 12 | 8 | 3 | 1 |
+| Inversions |  4 | 2 |  3 | 2 | 1 | 0 |
+
+Total number of inversions = (4+2+3+2+1+0) = 12
+
++ For *descending sort*, we can simply look at the number of elements to the left of the given element that are larger.
+
+| Array      | 10 | 6 | 12 | 8 | 3 | 1 |
+| Inversions |  1 | 2 |  0 | 0 | 0 | 0 |
+
+Total number of inversions = 1 + 2 = 3
+
++ For an array of size *n*
+\[ \text{Maximum possible number of inversions} = \frac{n(n-1)}{2} \]
+\[ \text{Minimum possible number of inversions} = 0 \]
+
+** Relation between time complexity of insertion sort and inversion
+If the inversion of an array is f(n), then the time complexity of the insertion sort will be $\theta (n + f(n))$.
+
+* Quick sort
+It is a divide and conquer technique. 
diff --git a/lectures/imgs/asymptotic-notations-properties.png b/lectures/imgs/asymptotic-notations-properties.png
new file mode 100644
index 0000000..d2f5690
Binary files /dev/null and b/lectures/imgs/asymptotic-notations-properties.png differ
diff --git a/lectures/imgs/binary-search.jpg b/lectures/imgs/binary-search.jpg
new file mode 100644
index 0000000..cab3519
Binary files /dev/null and b/lectures/imgs/binary-search.jpg differ
diff --git a/main.html b/main.html
new file mode 100644
index 0000000..493f30a
--- /dev/null
+++ b/main.html
@@ -0,0 +1,2403 @@
+
+
+
+
+
+
+
+Algorithms
+
+
+
+
+
+
+
+
+
+
+
+
+
+

Algorithms

+
+

Table of Contents

+
+ +
+
+ +
+

1. Lecture 1

+
+
+
+

1.1. Data structure and Algorithm

+
+
    +
  • A data structure is a particular way of storing and organizing data. The purpose is to effectively access and modify data effictively.
  • +
  • A procedure to solve a specific problem is called Algorithm.
  • +
+ +

+During programming we use data structures and algorithms that work on that data. +

+
+
+ +
+

1.2. Characteristics of Algorithms

+
+

+An algorithm has follwing characteristics. +

+
    +
  • Input : Zero or more quantities are externally supplied to algorithm.
  • +
  • Output : An algorithm should produce atleast one output.
  • +
  • Finiteness : The algorithm should terminate after a finite number of steps. It should not run infinitely.
  • +
  • Definiteness : Algorithm should be clear and unambiguous. All instructions of an algorithm must have a single meaning.
  • +
  • Effectiveness : Algorithm must be made using very basic and simple operations that a computer can do.
  • +
  • Language Independance : A algorithm is language independent and can be implemented in any programming language.
  • +
+
+
+ +
+

1.3. Behaviour of algorithm

+
+

+The behaviour of an algorithm is the analysis of the algorithm on basis of Time and Space. +

+
    +
  • Time complexity : Amount of time required to run the algorithm.
  • +
  • Space complexity : Amount of space (memory) required to execute the algorithm.
  • +
+ +

+The behaviour of algorithm can be used to compare two algorithms which solve the same problem. +
+The preference is traditionally/usually given to better time complexity. But we may need to give preference to better space complexity based on needs. +

+
+ +
+

1.3.1. Best, Worst and Average Cases

+
+

+The input size tells us the size of the input given to algorithm. Based on the size of input, the time/storage usage of the algorithm changes. Example, an array with larger input size (more elements) will taken more time to sort. +

+
    +
  • Best Case : The lowest time/storage usage for the given input size.
  • +
  • Worst Case : The highest time/storage usage for the given input size.
  • +
  • Average Case : The average time/storage usage for the given input size.
  • +
+
+
+ +
+

1.3.2. Bounds of algorithm

+
+

+Since algorithms are finite, they have bounded time taken and bounded space taken. Bounded is short for boundries, so they have a minimum and maximum time/space taken. These bounds are upper bound and lower bound. +

+
    +
  • Upper Bound : The maximum amount of space/time taken by the algorithm is the upper bound. It is shown as a function of worst cases of time/storage usage over all the possible input sizes.
  • +
  • Lower Bound : The minimum amount of space/time taken by the algorithm is the lower bound. It is shown as a function of best cases of time/storage usage over all the possible input sizes.
  • +
+
+
+
+ +
+

1.4. Asymptotic Notations

+
+
+
+

1.4.1. Big-Oh Notation [O]

+
+
    +
  • The Big Oh notation is used to define the upper bound of an algorithm.
  • +
  • Given a non negative funtion f(n) and other non negative funtion g(n), we say that \(f(n) = O(g(n)\) if there exists a positive number \(n_0\) and a positive constant \(c\), such that \[ f(n) \le c.g(n) \ \ \forall n \ge n_0 \]
  • +
  • So if growth rate of g(n) is greater than or equal to growth rate of f(n), then \(f(n) = O(g(n))\).
  • +
+
+
+
+
+
+

2. Lecture 2

+
+
+
+

2.1. Asymptotic Notations

+
+
+
+

2.1.1. Omega Notation [ \(\Omega\) ]

+
+
    +
  • It is used to shown the lower bound of the algorithm.
  • +
  • For any positive integer \(n_0\) and a positive constant \(c\), we say that, \(f(n) = \Omega (g(n))\) if \[ f(n) \ge c.g(n) \ \ \forall n \ge n_0 \]
  • +
  • So growth rate of \(g(n)\) should be less than or equal to growth rate of \(f(n)\)
  • +
+ +

+Note : If \(f(n) = O(g(n))\) then \(g(n) = \Omega (f(n))\) +

+
+
+ +
+

2.1.2. Theta Notation [ \(\theta\) ]

+
+
    +
  • If is used to provide the asymptotic equal bound.
  • +
  • \(f(n) = \theta (g(n))\) if there exists a positive integer \(n_0\) and a positive constants \(c_1\) and \(c_2\) such that \[ c_1 . g(n) \le f(n) \le c_2 . g(n) \ \ \forall n \ge n_0 \]
  • +
  • So the growth rate of \(f(n)\) and \(g(n)\) should be equal.
  • +
+ +

+Note : So if \(f(n) = O(g(n))\) and \(f(n) = \Omega (g(n))\), then \(f(n) = \theta (g(n))\) +

+
+
+ +
+

2.1.3. Little-Oh Notation [o]

+
+
    +
  • The little o notation defines the strict upper bound of an algorithm.
  • +
  • We say that \(f(n) = o(g(n))\) if there exists positive integer \(n_0\) and positive constant \(c\) such that, \[ f(n) < c.g(n) \ \ \forall n \ge n_0 \]
  • +
  • Notice how condition is <, rather than \(\le\) which is used in Big-Oh. So growth rate of \(g(n)\) is strictly greater than that of \(f(n)\).
  • +
+
+
+ +
+

2.1.4. Little-Omega Notation [ \(\omega\) ]

+
+
    +
  • The little omega notation defines the strict lower bound of an algorithm.
  • +
  • We say that \(f(n) = \omega (g(n))\) if there exists positive integer \(n_0\) and positive constant \(c\) such that, \[ f(n) > c.g(n) \ \ \forall n \ge n_0 \]
  • +
  • Notice how condition is >, rather than \(\ge\) which is used in Big-Omega. So growth rate of \(g(n)\) is strictly less than that of \(f(n)\).
  • +
+
+
+
+ +
+

2.2. Comparing Growth rate of funtions

+
+
+
+

2.2.1. Applying limit

+
+

+To compare two funtions \(f(n)\) and \(g(n)\). We can use limit +\[ \lim_{n\to\infty} \frac{f(n)}{g(n)} \] +

+
    +
  • If result is 0 then growth of \(g(n)\) > growth of \(f(n)\)
  • +
  • If result is \(\infty\) then growth of \(g(n)\) < growth of \(f(n)\)
  • +
  • If result is any finite number (constant), then growth of \(g(n)\) = growth of \(f(n)\)
  • +
+

+Note : L'Hôpital's rule can be used in this limit. +

+
+
+ +
+

2.2.2. Using logarithm

+
+

+Using logarithm can be useful to compare exponential functions. When comaparing functions \(f(n)\) and \(g(n)\), +

+
    +
  • If growth of \(\log(f(n))\) is greater than growth of \(\log(g(n))\), then growth of \(f(n)\) is greater than growth of \(g(n)\)
  • +
  • If growth of \(\log(f(n))\) is less than growth of \(\log(g(n))\), then growth of \(f(n)\) is less than growth of \(g(n)\)
  • +
  • When using log for comparing growth, comaparing constants after applying log is also required. For example, if functions are \(2^n\) and \(3^n\), then their logs are \(n.log(2)\) and \(n.log(3)\). Since \(log(2) < log(3)\), the growth rate of \(3^n\) will be higher.
  • +
  • On equal growth after applying log, we can't decide which function grows faster.
  • +
+
+
+ +
+

2.2.3. Common funtions

+
+

+Commonly, growth rate in increasing order is +\[ c < c.log(log(n)) < c.log(n) < c.n < n.log(n) < c.n^2 < c.n^3 < c.n^4 ... \] +\[ n^c < c^n < n! < n^n \] +Where \(c\) is any constant. +

+
+
+
+ +
+

2.3. Properties of Asymptotic Notations

+
+
+
+

2.3.1. Big-Oh

+
+
    +
  • Product : \[ Given\ f_1 = O(g_1)\ \ and\ f_2 = O(g_2) \implies f_1 f_2 = O(g_1 g_2) \] \[ Also\ f.O(g) = O(f g) \]
  • + +
  • Sum : For a sum of two functions, the big-oh can be represented with only with funcion having higer growth rate. \[ O(f_1 + f_2 + ... + f_i) = O(max\ growth\ rate(f_1, f_2, .... , f_i )) \]
  • + +
  • Constants : For a constant \(c\) \[ O(c.g(n)) = O(g(n)) \], this is because the constants don't effect the growth rate.
  • +
+
+
+ +
+

2.3.2. Properties

+
+ +
+

asymptotic-notations-properties.png +

+
+ +
    +
  • Reflexive : \(f(n) = O(f(n)\) and \(f(n) = \Omega (f(n))\) and \(f(n) = \theta (f(n))\)
  • +
  • Symmetric : If \(f(n) = \theta (g(n))\) then \(g(n) = \theta (f(n))\)
  • +
  • Transitive : If \(f(n) = O(g(n))\) and \(g(n) = O(h(n))\) then \(f(n) = O(h(n))\)
  • +
  • Transpose : If \(f(n) = O(g(n))\) then we can also conclude that \(g(n) = \Omega (f(n))\) so we say Big-Oh is transpose of Big-Omega and vice-versa.
  • +
  • Antisymmetric : If \(f(n) = O(g(n))\) and \(g(n) = O(f(n))\) then we conclude that \(f(n) = g(n)\)
  • +
  • Asymmetric : If \(f(n) = \omega (g(n))\) then we can conclude that \(g(n) \ne \omega (f(n))\)
  • +
+
+
+
+
+
+

3. Lecture 3

+
+
+
+

3.1. Calculating time complexity of algorithm

+
+

+We will look at three types of situations +

+
    +
  • Sequential instructions
  • +
  • Iterative instructions
  • +
  • Recursive instructions
  • +
+
+ +
+

3.1.1. Sequential instructions

+
+

+A sequential set of instructions are instructions in a sequence without iterations and recursions. It is a simple block of instructions with no branches. A sequential set of instructions has time complexity of O(1), i.e., it has constant time complexity. +

+
+
+ +
+

3.1.2. Iterative instructions

+
+

+A set of instructions in a loop. Iterative instructions can have different complexities based on how many iterations occurs depending on input size. +

+ +
    +
  • For fixed number of iterations (number of iterations known at compile time i.e. independant of the input size), the time complexity is constant, O(1). Example for(int i = 0; i < 100; i++) { … } will always have 100 iterations, so constant time complexity.
  • +
  • For n number of iterations ( n is the input size ), the time complexity is O(n). Example, a loop for(int i = 0; i < n; i++){ … } will have n iterations where n is the input size, so complexity is O(n). Loop for(int i = 0; i < n/2; i++){…} also has time complexity O(n) because n/2 iterations are done by loop and 1/2 is constant thus not in big-oh notation.
  • +
  • For a loop like for(int i = 1; i <= n; i = i*2){…} the value of i is update as *=2, so the number of iterations will be \(log_2 (n)\). Therefore, the time complexity is \(O(log_2 (n))\).
  • +
  • For a loop like for(int i = n; i > 1; i = i/2){…} the value of i is update as *=2, so the number of iterations will be \(log_2 (n)\). Therefore, the time complexity is \(O(log_2 (n))\).
  • +
+ +

+Nested Loops +
+

+
    +
  • If inner loop iterator doesn't depend on outer loop, the complexity of the inner loop is multiplied by the number of times outer loop runs to get the time complexity For example, suppose we have loop as
  • +
+ +
+for(int i = 0; i < n; i++){
+  ...
+  for(int j = 0; j < n; j *= 2){
+    ...
+  }
+  ...
+}
+
+ +

+Here, the outer loop will n times and the inner loop will run log(n) times. Therefore, the total number of time statements in the inner loop run is n.log(n) times. +Thus the time complexity is O(n.log(n)). +

+ +
    +
  • If inner loop and outer loop are related, then complexities have to be computed using sums. Example, we have loop
  • +
+ +
+for(int i = 0; i <= n; i++){
+  ...
+  for(int j = 0; j <= i; j++){
+    ...
+  }
+  ...
+}
+
+ +

+Here the outer loop will run n times, so i goes from 0 to n. The number of times inner loop runs is j, which depends on i. +

+ + + + +++ ++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Value of iNumber of times inner loop runs
00
11
22
..
..
..
nn
+ +

+So the total number of times inner loop runs = \(1+2+3+....+n\) +
+total number of times inner loop runs = \(\frac{n.(n+1)}{2}\) +
+total number of times inner loop runs = \(\frac{n^2}{2} + \frac{n}{2}\) +
+Therefore, time complexity is \(O(\frac{n^2}{2} + \frac{n}{2}) = O(n^2)\) +
+Another example, +
+Suppose we have loop +

+
+for(int i = 1; i <= n; i++){
+  ...
+  for(int j = 1; j <= i; j *= 2){
+    ...
+  }
+  ...
+}
+
+ +

+The outer loop will run n times with i from 1 to n, and inner will run log(i) times. +

+ + + + +++ ++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Value of iNumber of times inner loop runs
1log(1)
2log(2)
3log(3)
..
..
..
nlog(n)
+ +

+Thus, total number of times the inner loop runs is \(log(1) + log(2) + log(3) + ... + log(n)\). +
+total number of times inner loop runs = \(log(1.2.3...n)\) +
+total number of times inner loop runs = \(log(n!)\) +
+Using Stirling's approximation, we know that \(log(n!) = n.log(n) - n + 1\) +
+total number of times inner loop runs = \(n.log(n) - n + 1\) +
+Time complexity = \(O(n.log(n))\) +

+
+
+ +
+

3.1.3. An example for time complexities of nested loops

+
+

+Suppose a loop, +

+
+for(int i = 1; i <= n; i *= 2){
+  ...
+  for(int j = 1; j <= i; j *= 2){
+    ...
+  }
+  ...
+}
+
+

+Here, outer loop will run log(n) times. Let's consider for some given n, it runs k times, i.e, let +\[ k = log(n) \] +

+ +

+The inner loop will run log(i) times, so number of loops with changing values of i is +

+ + + + +++ ++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Value of iNumber of times inner loop runs
1log(1)
21log(2)
222.log(2)
233.log(2)
..
..
..
2k-1(k-1).log(2)
+ +

+So the total number of times inner loop runs is \(log(1) + log(2) + 2.log(2) + 3.log(2) + ... + (k-1).log(2)\) +\[ \text{number of times inner loop runs} = log(1) + log(2).[1+2+3+...+(k-1)] \] +\[ \text{number of times inner loop runs} = log(1) + log(2). \frac{(k-1).k}{2} \] +\[ \text{number of times inner loop runs} = log(1) + log(2). \frac{k^2}{2} - \frac{k}{2} \] +Putting value \(k = log(n)\) +\[ \text{number of times inner loop runs} = log(1) + log(2). \frac{log^2(n)}{2} - \frac{log(n)}{2} \] +\[ \text{Time complexity} = O(log^2(n)) \] +

+
+
+
+
+
+

4. Lecture 4

+
+
+
+

4.1. Time complexity of recursive instructions

+
+

+To get time complexity of recursive functions/calls, we first also show time complexity as recursive manner. +

+
+ +
+

4.1.1. Time complexity in recursive form

+
+

+We first have to create a way to describe time complexity of recursive functions in form of an equation as, +\[ T(n) = ( \text{Recursive calls by the function} ) + ( \text{Time taken per call, i.e, the time taken except for recursive calls in the function} ) \] +

+ +
    +
  • Example, suppose we have a recursive function
  • +
+ +
+
int fact(int n){
+  if(n == 0 || n == 1)
+    return 1;
+  else
+    return n * fact(n-1);
+}
+
+
+ +

+in this example, the recursive call is fact(n-1), therefore the time complexity of recursive call is T(n-1) and the time complexity of function except for recursive call is constant (let's assume c). So the time complexity is +\[ T(n) = T(n-1) + c \] +\[ T(1) = T(0) = C\ \text{where C is constant time} \] +

+
    +
  • Another example,
  • +
+ +
+
int func(int n){
+  if(n == 0 || n == 1)
+    return 1;
+  else
+    return func(n - 1) * func(n - 2);
+}
+
+
+ +

+Here, the recursive calls are func(n-1) and func(n-2), therefore time complexities of recursive calls is T(n-1) and T(n-2). The time complexity of function except the recursive calls is constant (let's assume c), so the time complexity is +\[ T(n) = T(n-1) + T(n-2) + c \] +\[ T(1) = T(0) = C\ \text{where C is constant time} \] +

+ +
    +
  • Another example,
  • +
+ +
+
int func(int n){
+  int r = 0;
+  for(int i = 0; i < n; i++)
+    r += i;
+
+  if(n == 0 || n == 1)
+    return r;
+  else
+    return r * func(n - 1) * func(n - 2);
+}
+
+
+ +

+Here, the recursive calls are func(n-1) and func(n-2), therefore time complexities of recursive calls is T(n-1) and T(n-2). The time complexity of function except the recursive calls is θ (n) because of the for loop, so the time complexity is +

+ +

+\[ T(n) = T(n-1) + T(n-2) + n \] +\[ T(1) = T(0) = C\ \text{where C is constant time} \] +

+
+
+
+ + +
+

4.2. Solving Recursive time complexities

+
+
+
+

4.2.1. Iterative method

+
+
    +
  • Take for example,
  • +
+

+\[ T(1) = T(0) = C\ \text{where C is constant time} \] +\[ T(n) = T(n-1) + c \] +

+ +

+We can expand T(n-1). +\[ T(n) = [ T(n - 2) + c ] + c \] +\[ T(n) = T(n-2) + 2.c \] +Then we can expand T(n-2) +\[ T(n) = [ T(n - 3) + c ] + 2.c \] +\[ T(n) = T(n - 3) + 3.c \] +

+ +

+So, if we expand it k times, we will get +

+ +

+\[ T(n) = T(n - k) + k.c \] +Since we know this recursion ends at T(1), let's put \(n-k=1\). +Therefore, \(k = n-1\). +\[ T(n) = T(1) + (n-1).c \] +

+ +

+Since T(1) = C +\[ T(n) = C + (n-1).c \] +So time complexity is, +\[ T(n) = O(n) \] +

+ +
    +
  • Another example,
  • +
+

+\[ T(1) = C\ \text{where C is constant time} \] +\[ T(n) = T(n-1) + n \] +

+ +

+Expanding T(n-1), +\[ T(n) = [ T(n-2) + n - 1 ] + n \] +\[ T(n) = T(n-2) + 2.n - 1 \] +

+ +

+Expanding T(n-2), +\[ T(n) = [ T(n-3) + n - 2 ] + 2.n - 1 \] +\[ T(n) = T(n-3) + 3.n - 1 - 2 \] +

+ +

+Expanding T(n-3), +\[ T(n) = [ T(n-4) + n - 3 ] + 3.n - 1 - 2 \] +\[ T(n) = T(n-4) + 4.n - 1 - 2 - 3 \] +

+ +

+So expanding till T(n-k) +\[ T(n) = T(n-k) + k.n - [ 1 + 2 + 3 + .... + k ] \] +\[ T(n) = T(n-k) + k.n - \frac{k.(k+1)}{2} \] +

+ +

+Putting \(n-k=1\). Therefore, \(k=n-1\). +\[ T(n) = T(1) + (n-1).n - \frac{(n-1).(n)}{2} \] +\[ T(n) = C + n^2 - n - \frac{n^2}{2} + \frac{n}{2} \] +

+ +

+Time complexity is +\[ T(n) = O(n^2) \] +

+
+
+
+

4.2.2. Master Theorem for Subtract recurrences

+
+

+For recurrence relation of type +

+ +

+\[ T(n) = c\ for\ n \le 1 \] +\[ T(n) = a.T(n-b) + f(n)\ for\ n > 1 \] +\[ \text{where for f(n) we can say, } f(n) = O(n^k) \] +\[ \text{where, a > 0, b > 0 and k} \ge 0 \] +

+ +
    +
  • If a < 1, then T(n) = O(nk)
  • +
  • If a = 1, then T(n) = O(nk+1)
  • +
  • If a > 1, then T(n) = O(nk . an/b)
  • +
+ +

+Example, \[ T(n) = 3T(n-1) + n^2 \] +Here, f(n) = O(n2), therfore k = 2, +
+Also, a = 3 and b = 1 +
+Since a > 1, \(T(n) = O(n^2 . 3^n)\) +

+
+
+ +
+

4.2.3. Master Theorem for divide and conquer recurrences

+
+

+\[ T(n) = aT(n/b) + f(n).(log(n))^k \] +\[ \text{here, f(n) is a polynomial function} \] +\[ \text{and, a > 0, b > 0 and k } \ge 0 \] +We calculate a value \(n^{log_ba}\) +

+ +
    +
  • If \(\theta (f(n)) < \theta ( n^{log_ba} )\) then \(T(n) = \theta (n^{log_ba})\)
  • +
  • If \(\theta (f(n)) > \theta ( n^{log_ba} )\) then \(T(n) = \theta (f(n).(log(n))^k )\)
  • +
  • If \(\theta (f(n)) = \theta ( n^{log_ba} )\) then \(T(n) = \theta (f(n) . (log(n))^{k+1})\)
  • +
+

+For the above comparision, we say higher growth rate is greater than slower growth rate. Eg, θ (n2) > θ (n). +

+ +

+Example, calculating complexity for +\[ T(n) = T(n/2) + 1 \] +Here, f(n) = 1 +
+Also, a = 1, b = 2 and k = 0. +
+Calculating nlogba = nlog21 = n0 = 1 +
+Therfore, θ (f(n)) = θ (nlogba) +
+So time complexity is +\[ T(n) = \theta ( 1 . (log(n))^{0 + 1} ) \] +\[ T(n) = \theta (log(n)) \] +

+ +

+Another example, calculate complexity for +\[ T(n) = 2T(n/2) + nlog(n) \] +

+ +

+Here, f(n) = n +
+Also, a = 2, b = 2 and k = 1 +
+Calculating nlogba = nlog22 = n +
+Therefore, θ (f(n)) = θ (nlogba) +
+So time complexity is, +\[ T(n) = \theta ( n . (log(n))^{2}) \] +

+
+
+
+ + +
+

4.3. Square root recurrence relations

+
+
+
+

4.3.1. Iterative method

+
+

+Example, +\[ T(n) = T( \sqrt{n} ) + 1 \] +we can write this as, +\[ T(n) = T( n^{1/2}) + 1 \] +Now, we expand \(T( n^{1/2})\) +\[ T(n) = [ T(n^{1/4}) + 1 ] + 1 \] +\[ T(n) = T(n^{1/(2^2)}) + 1 + 1 \] +Expand, \(T(n^{1/4})\) +\[ T(n) = [ T(n^{1/8}) + 1 ] + 1 + 1 \] +\[ T(n) = T(n^{1/(2^3)}) + 1 + 1 + 1 \] +

+ +

+Expanding k times, +\[ T(n) = T(n^{1/(2^k)}) + 1 + 1 ... \text{k times } + 1 \] +\[ T(n) = T(n^{1/(2^k)}) + k \] +

+ +

+Let's consider \(T(2)=C\) where C is constant. +
+Putting \(n^{1/(2^k)} = 2\) +\[ \frac{1}{2^k} log(n) = log(2) \] +\[ \frac{1}{2^k} = \frac{log(2)}{log(n)} \] +\[ 2^k = \frac{log(n)}{log(2)} \] +\[ 2^k = log_2n \] +\[ k = log(log(n)) \] +

+ +

+So putting k in time complexity equation, +\[ T(n) = T(2) + log(log(n)) \] +\[ T(n) = C + log(log(n)) \] +Time complexity is, +\[ T(n) = \theta (log(log(n))) \] +

+
+
+ +
+

4.3.2. Master Theorem for square root recurrence relations

+
+

+For recurrence relations with square root, we need to first convert the recurrance relation to the form with which we use master theorem. Example, +\[ T(n) = T( \sqrt{n} ) + 1 \] +Here, we need to convert \(T( \sqrt{n} )\) , we can do that by substituting +\[ \text{Substitute } n = 2^m \] +\[ T(2^m) = T ( \sqrt{2^m} ) + 1 \] +\[ T(2^m) = T ( 2^{m/2} ) + 1 \] +

+ +

+Now, we need to consider a new function such that, +\[ \text{Let, } S(m) = T(2^m) \] +Thus our time recurrance relation will become, +\[ S(m) = S(m/2) + 1 \] +Now, we can apply the master's theorem. +
+Here, f(m) = 1 +
+Also, a = 1, and b = 2 and k = 0 +
+Calculating mlogba = mlog21 = m0 = 1 +
+Therefore, θ (f(m)) = θ ( mlogba ) +
+So by master's theorem, +\[ S(m) = \theta (1. (log(m))^{0+1} ) \] +\[ S(m) = \theta (log(m) ) \] +Now, putting back \(m = log(n)\) +\[ T(n) = \theta (log(log(n))) \] +Another example, +\[ T(n) = 2.T(\sqrt{n})+log(n) \] +Substituting \(n = 2^m\) +\[ T(2^m) = 2.T(\sqrt{2^m}) + log(2^m) \] +\[ T(2^m) = 2.T(2^{m/2}) + m \] +Consider a function \(S(m) = T(2^m)\) +\[ S(m) = 2.S(m/2) + m \] +Here, f(m) = m +
+Also, a = 2, b = 2 and k = 0 +
+Calculating mlogba = mlog22 = 1 +
+Therefore, θ (f(m)) > θ (mlogba) +
+Using master's theorem, +\[ S(m) = \theta (m.(log(m))^0 ) \] +\[ S(m) = \theta (m.1) \] +Putting value of m, +\[ T(n) = \theta (log(n)) \] +

+
+
+
+
+
+

5. Lecture 5

+
+
+
+

5.1. Extended Master's theorem for time complexity of recursive algorithms

+
+
+
+

5.1.1. For (k = -1)

+
+

+\[ T(n) = aT(n/b) + f(n).(log(n))^{-1} \] +\[ \text{Here, } f(n) \text{ is a polynomial function} \] +\[ a > 0\ and\ b > 1 \] +

+ +
    +
  • If θ (f(n)) < θ ( nlogba ) then, T(n) = θ (nlogba)
  • +
  • If θ (f(n)) > θ ( nlogba ) then, T(n) = θ (f(n))
  • +
  • If θ (f(n)) < θ ( nlogba ) then, T(n) = θ (f(n).log(log(n)))
  • +
+
+
+ +
+

5.1.2. For (k < -1)

+
+

+\[ T(n) = aT(n/b) + f(n).(log(n))^{k} \] +\[ \text{Here, } f(n) \text{ is a polynomial function} \] +\[ a > 0\ and\ b > 1\ and\ k < -1 \] +

+ +
    +
  • If θ (f(n)) < θ ( nlogba ) then, T(n) = θ (nlogba)
  • +
  • If θ (f(n)) > θ ( nlogba ) then, T(n) = θ (f(n))
  • +
  • If θ (f(n)) < θ ( nlogba ) then, T(n) = θ (nlogba)
  • +
+
+
+
+ +
+

5.2. Tree method for time complexity of recursive algorithms

+
+

+Tree method is used when there are multiple recursive calls in our recurrance relation. Example, +\[ T(n) = T(n/5) + T(4n/5) + f(n) \] +Here, one call is T(n/5) and another is T(4n/5). So we can't apply master's theorem. So we create a tree of recursive calls which is used to calculate time complexity. +The first node, i.e the root node is T(n) and the tree is formed by the child nodes being the calls made by the parent nodes. Example, let's consider the recurrance relation +\[ T(n) = T(n/5) + T(4n/5) + f(n) \] +

+ +
+      +-----T(n/5)
+T(n)--+
+      +-----T(4n/5)
+
+ +

+Since T(n) calls T(n/5) and T(4n/5), the graph for that is shown as drawn above. Now using recurrance relation, we can say that T(n/5) will call T(n/52) and T(4n/52). Also, T(4n/5) will call T(4n/52) and T(42 n/ 52). +

+ +
+		    +--T(n/5^2)
+      +-----T(n/5)--+
+      +             +--T(4n/5^2)
+T(n)--+
+      +             +--T(4n/5^2)
+      +-----T(4n/5)-+
+		    +--T(4^2 n/5^2)
+
+ +

+Suppose we draw this graph for an unknown number of levels. +

+ +
+		    +--T(n/5^2)- - - - - - -  etc.
+      +-----T(n/5)--+
+      +             +--T(4n/5^2) - - - - - - - - - etc.
+T(n)--+
+      +             +--T(4n/5^2) - - - - - -  - - - etc.
+      +-----T(4n/5)-+
+		    +--T(4^2 n/5^2)- - - - - - etc.
+
+ +

+We will now replace T()'s with the cost of the call. The cost of the call is f(n), i.e, the time taken other than that caused by the recursive calls. +

+ +
+		    +--f(n/5^2)- - - - - - -  etc.
+      +-----f(n/5)--+
+      +             +--f(4n/5^2) - - - - - - - - - etc.
+f(n)--+
+      +             +--f(4n/5^2) - - - - - -  - - - etc.
+      +-----f(4n/5)-+
+		    +--f(4^2 n/5^2)- - - - - - etc.
+
+ +

+In our example, let's assume f(n) = n, therfore, +

+ +
+		  +--  n/5^2 - - - - - - -  etc.
+    +-----  n/5 --+
+    +             +-- 4n/5^2  - - - - - - - - - etc.
+n --+
+    +             +--  4n/5^2  - - - - - -  - - -etc.
+    +-----  4n/5 -+
+		  +--  4^2 n/5^2 - - - - - -  etc.
+
+ +

+Now we can get cost of each level. +

+ +
+			   +--  n/5^2 - - - - - - -  etc.
+	     +-----  n/5 --+
+	     +             +-- 4n/5^2  - - - - - - - - - etc.
+	 n --+
+	     +             +--  4n/5^2  - - - - - -  - - -etc.
+	     +----- 4n/5 --+
+			   +--  4^2 n/5^2 - - - - - -  etc.
+
+
+Sum :    n         n/5         n/25                      
+		  +4n/5       +4n/25
+			      +4n/25
+			      +16n/25
+       .....      .....       ......
+	 n          n           n
+
+ +

+Since sum on all levels is n, we can say that Total time taken is +\[ T(n) = \Sigma \ (cost\ of\ level_i) \] +

+ +

+Now we need to find the longest branch in the tree. If we follow the pattern of expanding tree in a sequence as shown, then the longest branch is always on one of the extreme ends of the tree. So for our example, if tree has (k+1) levels, then our branch is either (n/5k) of (4k n/5k). Consider the terminating condition is, \(T(a) = C\). Then we will calculate value of k by equating the longest branch as, +\[ \frac{n}{5^k} = a \] +\[ k = log_5 (n/a) \] +Also, +\[ \frac{4^k n}{5^k} = a \] +\[ k = log_{5/4} n/a \] +

+ +

+So, we have two possible values of k, +\[ k = log_{5/4}(n/a),\ log_5 (n/a) \] +

+ +

+Now, we can say that, +\[ T(n) = \sum_{i=1}^{k+1} \ (cost\ of\ level_i) \] +Since in our example, cost of every level is n. +\[ T(n) = n.(k+1) \] +Putting values of k, +\[ T(n) = n.(log_{5/4}(n/a) + 1) \] +or +\[ T(n) = n.(log_{5}(n/a) + 1) \] +

+ +

+Of the two possible time complexities, we consider the one with higher growth rate in the big-oh notation. +

+
+ +
+

5.2.1. Avoiding tree method

+
+

+The tree method as mentioned is mainly used when we have multiple recursive calls with different factors. But when using the big-oh notation (O). We can avoid tree method in favour of the master's theorem by converting recursive call with smaller factor to larger. This works since big-oh calculates worst case. Let's take our previous example +\[ T(n) = T(n/5) + T(4n/5) + f(n) \] +Since T(n) is an increasing function. We can say that +\[ T(n/5) < T(4n/5) \] +So we can replace smaller one and approximate our equation to, +\[ T(n) = T(4n/5) + T(4n/5) + f(n) \] +\[ T(n) = 2.T(4n/5) + f(n) \] +

+ +

+Now, our recurrance relation is in a form where we can apply the mater's theorem. +

+
+
+
+ +
+

5.3. Space complexity

+
+

+The amount of memory used by the algorithm to execute and produce the result for a given input size is space complexity. Similar to time complexity, when comparing two algorithms space complexity is usually represented as the growth rate of memory used with respect to input size. The space complexity includes +

+
    +
  • Input space : The amount of memory used by the inputs to the algorithm.
  • +
  • Auxiliary space : The amount of memory used during the execution of the algorithm, excluding the input space.
  • +
+ +

+NOTE : Space complexity by definition includes both input space and auxiliary space, but when comparing algorithms the input space is often ignored. This is because two algorithms that solve the same problem will have same input space based on input size (Example, when comparing two sorting algorithms, the input space will be same because both get a list as an input). So from this point on, refering to space complexity, we are actually talking about Auxiliary Space Complexity, which is space complexity but only considering the auxiliary space. +

+
+ +
+

5.3.1. Auxiliary space complexity

+
+

+The space complexity when we disregard the input space is the auxiliary space complexity, so we basically treat algorithm as if it's input space is zero. Auxiliary space complexity is more useful when comparing algorithms because the algorithms which are working towards same result will have the same input space, Example, the sorting algorithms will all have the input space of the list, so it is not a metric we can use to compare algorithms. So from here, when we calculate space complexity, we are trying to calculate auxiliary space complexity and sometimes just refer to it as space complexity. +

+
+
+
+ +
+

5.4. Calculating auxiliary space complexity

+
+

+There are two parameters that affect space complexity, +

+
    +
  • Data space : The memory taken by the variables in the algorithm. So allocating new memory during runtime of the algorithm is what forms the data space. The space which was allocated for the input space is not considered a part of the data space.
  • +
  • Code Execution Space : The memory taken by the instructions themselves is called code execution space. Unless we have recursion, the code execution space remains constant since the instructions don't change during runtime of the algorithm. When using recursion, the instructions are loaded again and again in memory, thus increasing code execution space.
  • +
+
+ +
+

5.4.1. Data Space used

+
+

+The data space used by the algorithm depends on what data structures it uses to solve the problem. Example, +

+ +
+
/* Input size of n */
+void algorithms(int n){
+  /* Creating an array of whose size depends on input size */
+  int data[n];
+
+  for(int i = 0; i < n; i++){
+    int x = data[i];
+    // Work on data
+  }
+}
+
+
+ +

+Here, we create an array of size n, so the increase in allocated space increases with the input size. So the space complexity is, \(\theta (n)\). +
+

+
    +
  • Another example,
  • +
+ +
+
/* Input size of n */
+void algorithms(int n){
+  /* Creating a matrix sized n*n of whose size depends on input size */
+  int data[n][n];
+
+  for(int i = 0; i < n; i++){
+    for(int j = 0; j < n; j++){
+      int x = data[i][j];
+      // Work on data
+    }
+  }
+}
+
+
+ +

+Here, we create a matrix of size n*n, so the increase in allocated space increases with the input size by \(n^2\). So the space complexity is, \(\theta (n^2)\). +

+ +
    +
  • If we use a node based data structure like linked list or trees, then we can show space complexity as the number of nodes used by algorithm based on input size, (if all nodes are of equal size).
  • +
  • Space complexity of the hash map is considered O(n) where n is the number of entries in the hash map.
  • +
+
+
+ +
+

5.4.2. Code Execution space in recursive algorithm

+
+

+When we use recursion, the function calls are stored in the stack. This means that code execution space will increase. A single function call has fixed (constant) space it takes in the memory. So to get space complexity, we need to know how many function calls occur in the longest branch of the function call tree. +

+ +
    +
  • NOTE : Space complexity only depends on the longest branch of the function calls tree.
  • +
  • The tree is made the same way we make it in the tree method for calculating time complexity of recursive algorithms
  • +
+ +

+This is because at any given time, the stack will store only a single branch. +

+ +
    +
  • Example,
  • +
+ +
+
int func(int n){
+  if(n == 1 || n == 0)
+    return 1;
+  else
+    return n * func(n - 1);
+}
+
+
+ +

+To calculate space complexity we can use the tree method. But rather than when calculating time complexity, we will count the number of function calls using the tree. +We will do this by drawing tree of what function calls will look like for given input size n. +
+The tree for k+1 levels is, +

+ +
+func(n)--func(n-1)--func(n-2)--.....--func(n-k)
+
+ +

+This tree only has a single branch. To get the number of levels for a branch, we put the terminating condition at the extreme branches of the tree. Here, the terminating condition is func(1), therefore, we will put \(func(1) = func(n-k)\), i.e, +\[ 1 = n - k \] +\[ k + 1 = n \] +

+ +

+So the number of levels is \(n\). Therefore, space complexity is \(\theta (n)\) +

+ +
    +
  • Another example,
  • +
+ +
+
void func(int n){
+  if(n/2 <= 1)
+    return n;
+  func(n/2);
+  func(n/2);
+}
+
+
+ +

+Drawing the tree for k+1 levels. +

+
+			  +--func(n/2^2)- - - - - - -  func(n/2^k)
+	 +-----func(n/2)--+
+	 +                +--func(n/2^2) - - - - - - - - - func(n/2^k)
+func(n)--+
+	 +               +--func(n/2^2) - - - - - -  - - - func(n/2^k)
+	 +-----func(n/2)-+
+			 +--func(n/2^2)- - - - - - func(n/2^k)
+
+ +
    +
  • As we know from the tree method, the two extreme branches of the tree will always be the longest ones.
  • +
+ +

+Both the extreme branches have the same call which here is func(n/2k). To get the number of levels for a branch, we put the terminating condition at the extreme branches of the tree. Here, the terminating condition is func(2), therefore, we will put \(func(2) = func(n/2^k)\), i.e, +\[ 2 = \frac{n}{2^k} \] +\[ k + 1 = log_2n \] +Number of levels is \(log_2n\). Therefore, space complexity is \(\theta (log_2n)\). +

+
+
+
+
+
+

6. Lecture 6

+
+
+
+

6.1. Divide and Conquer algorithms

+
+

+Divide and conquer is a problem solving strategy. In divide and conquer algorithms, we solve problem recursively applying three steps : +

+
    +
  • Divide : Problem is divided into smaller problems that are instances of same problem.
  • +
  • Conquer : If subproblems are large, divide and solve them recursivly. If subproblem is small enough then solve it in a straightforward method
  • +
  • Combine : combine the solutions of subproblems into the solution for the original problem.
  • +
+ +

+Example, +

+
    +
  1. Binary search
  2. +
  3. Quick sort
  4. +
  5. Merge sort
  6. +
  7. Strassen's matrix multiplication
  8. +
+
+
+ +
+

6.2. Searching for element in array

+
+
+
+

6.2.1. Straight forward approach for searching (Linear Search)

+
+
+
int linear_search(int *array, int n, int x){
+  for(int i = 0; i < n; i++){
+    if(array[i] == x){
+      printf("Found at index : %d", i);
+      return i;
+    }
+  }
+  return -1;
+}
+
+
+ +

+Recursive approach +

+ +
+
# call this function with index = 0
+def linear_search(array, item, index):
+    if len(array) < 1:
+        return -1
+    elif array[index] == item:
+        return index
+    else:
+        return linear_search(array, item, index + 1)
+
+
+ +

+Recursive time complexity : \(T(n) = T(n-1) + 1\) +

+ +
    +
  • Best Case : The element to search is the first element of the array. So we need to do a single comparision. Therefore, time complexity will be constant O(1).
  • +
+

+
+

+
    +
  • Worst Case : The element to search is the last element of the array. So we need to do n comparisions for the array of size n. Therefore, time complexity is O(n).
  • +
+

+
+

+
    +
  • Average Case : For calculating the average case, we need to consider the average number of comparisions done over all possible cases.
  • +
+ + + + +++ ++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Position of element to search (x)Number of comparisions done
01
12
23
..
..
..
n-1n
………………..………………..
Sum\(\frac{n(n+1)}{2}\)
+ +

+\[ \text{Average number of comparisions} = \frac{ \text{Sum of number of comparisions of all cases} }{ \text{Total number of cases.} } \] +\[ \text{Average number of comparisions} = \frac{n(n+1)}{2} \div n \] +\[ \text{Average number of comparisions} = \frac{n+1}{2} \] +\[ \text{Time complexity in average case} = O(n) \] +

+
+
+ +
+

6.2.2. Divide and conquer approach (Binary search)

+
+

+The binary search algorithm works on an array which is sorted. In this algorithm we: +

+
    +
  1. Check the middle element of the array, return the index if element found.
  2. +
  3. If element > array[mid], then our element is in the right part of the array, else it is in the left part of the array.
  4. +
  5. Get the mid element of the left/right sub-array
  6. +
  7. Repeat this process of division to subarray's and comparing the middle element till our required element is found.
  8. +
+ +

+The divide and conquer algorithm works as, +
+Suppose binarySearch(array, left, right, key), left and right are indicies of left and right of subarray. key is the element we have to search. +

+
    +
  • Divide part : calculate mid index as mid = left + (right - left) /2 or (left + right) / 2. If array[mid] == key, return the value of mid.
  • +
  • Conquer part : if array[mid] > key, then key must not be in right half. So we search for key in left half, so we will recursively call binarySearch(array, left, mid - 1, key). Similarly, if array[mid] < key, then key must not be in left half. So we search for key in right half, so recursively call binarySearch(array, mid + 1, right, key).
  • +
  • Combine part : Since the binarySearch function will either return -1 or the index of the key, there is no need to combine the solutions of the subproblems.
  • +
+ + +
+

binary-search.jpg +

+
+ +
+
int binary_search(int *array, int n, int x){
+  int low = 0;
+  int high = n;
+
+  int mid = (low + high) / 2;
+
+  while(low <= high){
+    mid = (low + high) / 2;
+    if (x == array[mid]){
+      return mid;
+    }else if (x < array[mid]){
+      low = low;
+      high = mid - 1;
+    }else{
+      low = mid + 1;
+      high = high;
+    }
+  }
+
+  return -1;
+}
+
+
+ +

+Recursive approach: +

+ +
+
int binary_search(int *array, int left, int right, int x){
+  if(left > right)
+    return -1;
+
+  int mid = (left + right) / 2;
+  // or we can use mid = left + (right - left) / 2, this will avoid int overflow when array has more elements.
+
+  if (x == array[mid])
+    return mid;
+  else if (x < array[mid])
+    return binary_search(array, left, mid - 1, x);
+  else
+    return binary_search(array, mid + 1, right, x);
+}
+
+
+ +

+Recursive time complexity : \(T(n) = T(n/2) + 1\) +

+ +
    +
  • Best Case : Time complexity = O(1)
  • +
  • Average Case : Time complexity = O(log n)
  • +
  • Worst Case : Time complexity = O(log n)
  • +
+ +

+Binary search is better for sorted arrays and linear search is better for sorted arrays. +
+Another way to visualize binary search is using the binary tree. +

+
+
+
+ +
+

6.3. Max and Min element from array

+
+
+
+

6.3.1. Straightforward approach

+
+
+
def min_max(a):
+    max = min = a[1]
+    for i in range(2, n):
+        if a[i] > max:
+            max = a[i];
+        elif a[i] < min:
+            min = a[i];
+
+    return (min,max)
+
+
+ +
    +
  • Best case : array is sorted in ascending order. Number of comparisions is \(n-1\). Time complexity is \(O(n)\).
  • +
  • Worst case : array is sorted in descending order. Number of comparisions is \(2.(n-1)\). Time complexity is \(O(n)\).
  • +
  • Average case : array can we arranged in n! ways, this makes calculating number of comparisions in the average case hard and it is somewhat unnecessary, so it is skiped. Time complexity is \(O(n)\)
  • +
+
+
+ +
+

6.3.2. Divide and conquer approach

+
+

+Suppose the function is MinMax(array, left, right) which will return a tuple (min, max). We will divide the array in the middle, mid = (left + right) / 2. The left array will be array[left:mid] and right aray will be array[mid+1:right] +

+
    +
  • Divide part : Divide the array into left array and right array. If array has only single element then both min and max are that single element, if array has two elements then compare the two and the bigger element is max and other is min.
  • +
  • Conquer part : Recursively get the min and max of left and right array, leftMinMax = MinMax(array, left, mid) and rightMinMax = MinMax(array, mid + 1, right).
  • +
  • Combine part : If leftMinMax[0] > rightMinmax[0], then min = righMinMax[0], else min = leftMinMax[0]. Similarly, if leftMinMax[1] > rightMinMax[1], then max = leftMinMax[1], else max = rightMinMax[1].
  • +
+ +
+
# Will return (min, max)
+def minmax(array, left, right):
+    if left == right:       # Single element in array
+        return (array[left], array[left])
+    elif left + 1 == right: # Two elements in array
+        if array[left] > array[right]:
+            return (array[right], array[left])
+        else:
+            return (array[left], array[right])
+    else:                  # More than two elements
+        mid = (left + right) / 2
+        minimum, maximum = 0, 0
+        leftMinMax = minmax(array, left, mid)
+        rightMinMax = minmax(array, mid + 1, right)
+
+        # Combining result of the minimum from left and right subarray's
+        if leftMinMax[0] > rightMinMax[0]:
+            minimum = rightMinMax[0]
+        else:
+            minimum = leftMinMax[0]
+
+        # Combining result of the maximum from left and right subarray's
+        if leftMinMax[1] > rightMinMax[1]:
+            maximum = leftMinMax[1]
+        else:
+            maximum = rightMinMax[1]
+
+        return (minimum, maximum)
+
+
+
+ +
    +
  • Time complexity
  • +
+

+We are dividing the problem into two parts of approximately, and it takes two comparisions on each part. Let's consider a comparision takes unit time. Then time complexity is +\[ T(n) = T(n/2) + T(n/2) + 2 \] +\[ T(n) = 2.T(n/2) + 2 \] +The recurrance terminated if single element in array with zero comparisions, i.e, \(T(1) = 0\), or when two elements with single comparision \(T(2) = 1\). +
+Now we can use the master's theorem or tree method to solve for time complexity. +\[ T(n) = \theta (n) \] +

+ +
    +
  • Space complexity
  • +
+

+For space complexity, we need to find the longest branch of the recursion tree. Since both recursive calls are same sized, and the factor is (1/2), for k+1 levels, function call will be func(n/2k), and terminating condition is func(2) +\[ func(2) = func(n/2^k) \] +\[ 2 = \frac{n}{2^k} \] +\[ k + 1 = log_2n \] +Since longest branch has \(log_2n\) nodes, the space complexity is \(O(log_2n)\). +

+ +
    +
  • Number of comparisions
  • +
+

+In every case i.e, average, best and worst cases, the number of comparisions in this algorithm is same. +\[ \text{Total number of comparisions} = \frac{3n}{2} - 2 \] +If n is not a power of 2, we will round the number of comparision up. +

+
+
+ +
+

6.3.3. Efficient single loop approach (Increment by 2)

+
+

+In this algorithm we will compare pairs of numbers from the array. It works on the idea that the larger number of the two in pair can be the maximum number and smaller one can be the minimum one. So after comparing the pair, we can simply test from maximum from the bigger of two an minimum from smaller of two. This brings number of comparisions to check two numbers in array from 4 (when we increment by 1) to 3 (when we increment by 2). +

+ +
+
def min_max(array):
+    (minimum, maximum) = (array[0], array[0])
+    i = 1
+    while i < len(array):
+        if i + 1 == len(array):  # don't check i+1, it's out of bounds, break the loop after checking a[i]
+            if array[i] > maximum:
+                maximum = array[i]
+            elif array[i] < minimum:
+                minimum = array[i]
+            break
+
+        if array[i] > array[i + 1]:
+            # check possibility that array[i] is maximum and array[i+1] is minimum
+            if array[i] > maximum:
+                maximum = array[i]
+            if array[i + 1] < minimum:
+                minimum = array[i + 1]
+        else:
+            # check possibility that array[i+1] is maximum and array[i] is minimum
+            if array[i + 1] > maximum:
+                maximum = array[i + 1]
+            if array[i] < minimum:
+                minimum = array[i]
+
+        i += 2
+    return (minimum, maximum)
+
+
+ +
    +
  • Time complexity = O(n)
  • +
  • Space complexity = O(1)
  • +
  • Total number of comparisions = +\[ \text{If n is odd}, \frac{3(n-1)}{2} \] +\[ \text{If n is even}, \frac{3n}{2} - 2 \]
  • +
+
+
+
+
+
+

7. Lecture 7

+
+
+
+

7.1. Square matrix multiplication

+
+

+Matrix multiplication algorithms taken from here: +https://www.cs.mcgill.ca/~pnguyen/251F09/matrix-mult.pdf +

+
+ +
+

7.1.1. Straight forward method

+
+
+
/* This will calculate A X B and store it in C. */
+#define N 3
+
+int main(){
+  int A[N][N] = {
+    {1,2,3},
+    {4,5,6},
+    {7,8,9} };
+
+  int B[N][N] = {
+    {10,20,30},
+    {40,50,60},
+    {70,80,90} };
+
+  int C[N][N];
+
+  for(int i = 0; i < N; i++){
+    for(int j = 0; j < N; j++){
+      C[i][j] = 0;
+      for(int k = 0; k < N; k++){
+        C[i][j] += A[i][k] * B[k][j];
+      }
+    }
+  }
+
+  return 0;
+}
+
+
+ +

+Time complexity is \(O(n^3)\) +

+
+
+ +
+

7.1.2. Divide and conquer approach

+
+

+The divide and conquer algorithm only works for a square matrix whose size is n X n, where n is a power of 2. The algorithm works as follows. +

+ +
+MatrixMul(A, B, n):
+  If n == 2 {
+    return A X B
+  }else{
+    Break A into four parts A_11, A_12, A_21, A_22, where A = [[ A_11, A_12],
+							       [ A_21, A_22]]
+
+    Break B into four parts B_11, B_12, B_21, B_22, where B = [[ B_11, B_12],
+							       [ B_21, B_22]]
+
+    C_11 = MatrixMul(A_11, B_11, n/2) + MatrixMul(A_12, B_21, n/2)
+    C_12 = MatrixMul(A_11, B_12, n/2) + MatrixMul(A_12, B_22, n/2)
+    C_21 = MatrixMul(A_21, B_11, n/2) + MatrixMul(A_22, B_21, n/2)
+    C_22 = MatrixMul(A_21, B_12, n/2) + MatrixMul(A_22, B_22, n/2)
+
+    C = [[ C_11, C_12],
+	 [ C_21, C_22]]
+
+    return C
+  }
+
+ +

+The addition of matricies of size (n X n) takes time \(\theta (n^2)\), therefore, for computation of C11 will take time of \(\theta \left( \left( \frac{n}{2} \right)^2 \right)\), which is equals to \(\theta \left( \frac{n^2}{4} \right)\). Therefore, computation time of C11, C12, C21 and C22 combined will be \(\theta \left( 4 \frac{n^2}{4} \right)\), which is equals to \(\theta (n^2)\). +
+There are 8 recursive calls in this function with MatrixMul(n/2), therefore, time complexity will be +\[ T(n) = 8T(n/2) + \theta (n^2) \] +Using the master's theorem +\[ T(n) = \theta (n^{log_28}) \] +\[ T(n) = \theta (n^3) \] +

+
+
+ +
+

7.1.3. Strassen's algorithm

+
+

+Another, more efficient divide and conquer algorithm for matrix multiplication. This algorithm also only works on square matrices with n being a power of 2. This algorithm is based on the observation that, for A X B = C. We can calculate C11, C12, C21 and C22 as, +

+ +

+\[ \text{C_11 = P_5 + P_4 - P_2 + P_6} \] +\[ \text{C_12 = P_1 + P_2} \] +\[ \text{C_21 = P_3 + P_4} \] +\[ \text{C_22 = P_1 + P _5 - P_3 - P_7} \] +Where, +\[ \text{P_1 = A_11 X (B_12 - B_22)} \] +\[ \text{P_2 = (A_11 + A_12) X B_22} \] +\[ \text{P_3 = (A_21 + A_22) X B_11} \] +\[ \text{P_4 = A_22 X (B_21 - B_11)} \] +\[ \text{P_5 = (A_11 + A_22) X (B_11 + B_22)} \] +\[ \text{P_6 = (A_12 - A_22) X (B_21 + B_22)} \] +\[ \text{P_7 = (A_11 - A_21) X (B_11 + B_12)} \] +This reduces number of recursion calls from 8 to 7. +

+ +
+Strassen(A, B, n):
+  If n == 2 {
+    return A X B
+  }
+  Else{
+    Break A into four parts A_11, A_12, A_21, A_22, where A = [[ A_11, A_12],
+							       [ A_21, A_22]]
+
+    Break B into four parts B_11, B_12, B_21, B_22, where B = [[ B_11, B_12],
+							       [ B_21, B_22]]
+    P_1 = Strassen(A_11, B_12 - B_22, n/2)
+    P_2 = Strassen(A_11 + A_12, B_22, n/2)
+    P_3 = Strassen(A_21 + A_22, B_11, n/2)
+    P_4 = Strassen(A_22, B_21 - B_11, n/2)
+    P_5 = Strassen(A_11 + A_22, B_11 + B_22, n/2)
+    P_6 = Strassen(A_12 - A_22, B_21 + B_22, n/2)
+    P_7 = Strassen(A_11 - A_21, B_11 + B_12, n/2)
+    C_11 = P_5 + P_4 - P_2 + P_6
+    C_12 = P_1 + P_2
+    C_21 = P_3 + P_4
+    C_22 = P_1 + P_5 - P_3 - P_7
+    C = [[ C_11, C_12],
+	 [ C_21, C_22]]
+    return C
+  }
+
+ +

+This algorithm uses 18 matrix addition operations. So our computation time for that is \(\theta \left(18\left( \frac{n}{2} \right)^2 \right)\) which is equal to \(\theta (4.5 n^2)\) which is equal to \(\theta (n^2)\). +
+There are 7 recursive calls in this function which are Strassen(n/2), therefore, time complexity is +\[ T(n) = 7T(n/2) + \theta (n^2) \] +Using the master's theorem +\[ T(n) = \theta (n^{log_27}) \] +\[ T(n) = \theta (n^{2.807}) \] +

+ + +
    +
  • NOTE : The divide and conquer approach and strassen's algorithm typically use n == 1 as their terminating condition since for multipliying 1 X 1 matrices, we only need to calculate product of the single element they contain, that product is thus the single element of our resultant 1 X 1 matrix.
  • +
+
+
+
+ +
+

7.2. Sorting algorithms

+
+
+
+

7.2.1. In place vs out place sorting algorithm

+
+

+If the space complexity of a sorting algorithm is \(\theta (1)\), then the algorithm is called in place sorting, else the algorithm is called out place sorting. +

+
+
+ +
+

7.2.2. Bubble sort

+
+

+Simplest sorting algorithm, easy to implement so it is useful when number of elements to sort is small. It is an in place sorting algorithm. We will compare pairs of elements from array and swap them to be in correct order. Suppose input has n elements. +

+
    +
  • For first pass of the array, we will do n-1 comparisions between pairs, so 1st and 2nd element; then 2nd and 3rd element; then 3rd and 4th element; till comparision between (n-1)th and nth element, swapping positions according to the size. A single pass will put a single element at the end of the list at it's correct position.
  • +
  • For second pass of the array, we will do n-2 comparisions because the last element is already in it's place after the first pass.
  • +
  • Similarly, we will continue till we only do a single comparision.
  • +
  • The total number of comparisions will be +\[ \text{Total comparisions} = (n - 1) + (n - 2) + (n - 3) + ..... + 2 + 1 \] +\[ \text{Total comparisions} = \frac{n(n-1)}{2} \] +Therefore, time complexity is \(\theta (n^2)\)
  • +
+ +
+
void binary_search(int array[]){
+  /* i is the number of comparisions in the pass */
+  for(int i = len(array) - 1; i >= 1; i--){
+    /* j is used to traverse the list */
+    for(int j = 0; j < i; j++){
+      if(array[j] > array[j+1])
+        array[j], array[j+1] = array[j+1], array[j];
+    }
+  }
+}
+
+
+ +

+Minimum number of swaps can be calculated by checking how many swap operations are needed to get each element in it's correct position. This can be done by checking the number of smaller elements towards the left. For descending, check the number of larger elements towards the left of the given element. Example for ascending sort, +

+ + + +++ ++ ++ ++ ++ ++ + + + + + + + + + + + + + + + + + + + +
Array211617831
Minimum number of swaps to get in correct position31000
+

+Therefore, minimum number of swaps is ( 3 + 1 + 0 + 0 + 0) , which is equal to 4 swaps. +

+ +
    +
  • Reducing number of comparisions in implementation : at the end of every pass, check the number of swaps. If number of swaps in a pass is zero, then the array is sorted. This implementation does not give minimum number of comparisions, but reduces number of comparisions from default implementation. It reduces the time complexity to \(\theta (n)\) for best case scenario, since we only need to pass through array once.
  • +
+

+Recursive time complexity : \(T(n) = T(n-1) + n - 1\) +

+
+
+
+
+
+

8. Lecture 8

+
+
+
+

8.1. Selection sort

+
+

+It is an inplace sorting technique. In this algorithm, we will get the minimum element from the array, then we swap it to the first position. Now we will get the minimum from array[1:] and place it in index 1. Similarly, we get minimum from array[2:] and then place it on index 2. We do till we get minimum from array[len(array) - 2:] and place minimum on index [len(array) - 2]. +

+ +
+
void selection_sort(int array[]){
+  for( int i = 0; i < len(array)-2 ; i++ ) {
+    /* Get the minimum index from the sub-array [i:] */
+    int min_index = i;
+    for( int j = i+1; j < len(array) - 1; j++ )
+      if (array[j] < array[min_index]) { min_index = j; }
+
+    /* Swap the min_index with it's position at start of sub-array */
+    array[i], array[min_index] = array[min_index], array[i];
+  }
+}
+
+
+ +

+The total number of comparisions is, +\[ \text{Total number of comparisions} = (n -1) + (n-2) + (n-3) + ... + (1) \] +\[ \text{Total number of comparisions} = \frac{n(n-1)}{2} \] +For this algorithm, number of comparisions are same in best, average and worst case. +Therefore the time complexity in all cases is, \[ \text{Time complexity} = \theta (n) \] +

+ +
    +
  • Recurrance time complexity : \(T(n) = T(n-1) + n - 1\)
  • +
+
+
+ +
+

8.2. Insertion sort

+
+

+It is an inplace sorting algorithm. +

+
    +
  • In this algorithm, we first divide array into two sections. Initially, the left section has a single element and right section has all the other elements. Therefore, the left part is sorted and right part is unsorted.
  • +
  • We call the leftmost element of the right section the key.
  • +
  • Now, we insert the key in it's correct position, in left section.
  • +
  • As commanly known, for insertion operation we need to shift elements. So we shift elements in the left section.
  • +
+ +
+
void insertion_sort ( int array[] ) {
+  for( int i = 1; i < len(array); i++ ) {
+    /* Key is the first element of the right section of array */
+    int key = array[j];
+    int j = i - 1;
+
+    /* Shift till we find the correct position of the key in the left section */
+    while ( j > 0 && array[j] > key ) {
+      array[j + 1] = array[j];
+      j -= 1;
+    }
+    /* Insert key in it's correct position */
+    array[j+1] = key;
+  }
+}
+
+
+ +
    +
  • Time complexity
  • +
+ +

+Best Case : The best case is when input array is already sorted. In this case, we do (n-1) comparisions and no swaps. The time complexity will be \(\theta (n)\) +
+Worst Case : The worst case is when input array is is descending order when we need to sort in ascending order and vice versa (basically reverse of sorted). The number of comparisions is +
+\[ [1 + 2 + 3 + .. + (n-1)] = \frac{n(n-1)}{2} \] +
+The number of element shift operations is +
+\[ [1 + 2 + 3 + .. + (n-1)] = \frac{n(n-1)}{2} \] +
+Total time complexity becomes \(\theta \left( 2 \frac{n(n-1)}{2} \right)\), which is simplified to \(\theta (n^2)\). +

+ +
    +
  • NOTE : Rather than using linear search to find the position of key in the left (sorted) section, we can use binary search to reduce number of comparisions.
  • +
+
+
+ +
+

8.3. Inversion in array

+
+

+The inversion of array is the measure of how close array is from being sorted. +
+For an ascending sort, it is the amount of element pairs such that array[i] > array[j] and i < j OR IN OTHER WORDS array[i] < array[j] and i > j. +

+
    +
  • For ascending sort, we can simply look at the number of elements to left of the given element that are smaller.
  • +
+ + + + +++ ++ ++ ++ ++ ++ ++ + + + + + + + + + + + + + + + + + + + + + +
Array10612831
Inversions423210
+ +

+Total number of inversions = (4+2+3+2+1+0) = 12 +

+ +
    +
  • For descending sort, we can simply look at the number of elements to the left of the given element that are larger.
  • +
+ + + + +++ ++ ++ ++ ++ ++ ++ + + + + + + + + + + + + + + + + + + + + + +
Array10612831
Inversions120000
+ +

+Total number of inversions = 1 + 2 = 3 +

+ +
    +
  • For an array of size n
  • +
+

+\[ \text{Maximum possible number of inversions} = \frac{n(n-1)}{2} \] +\[ \text{Minimum possible number of inversions} = 0 \] +

+
+ +
+

8.3.1. Relation between time complexity of insertion sort and inversion

+
+

+If the inversion of an array is f(n), then the time complexity of the insertion sort will be \(\theta (n + f(n))\). +

+
+
+
+
+
+ + diff --git a/main.org b/main.org new file mode 100644 index 0000000..a8ea427 --- /dev/null +++ b/main.org @@ -0,0 +1,24 @@ +#+TITLE: Algorithms +#+AUTHOR: Anmol Nawani +#+SETUPFILE: ./org/theme-readtheorg-local.setup +# #+HTML_HEAD: +#+OPTIONS: html-postamble:nil + +# https://www.enjoyalgorithms.com/ : Great website + +* Lecture 1 + #+INCLUDE: "./lectures/1.org" +* Lecture 2 + #+INCLUDE: "./lectures/2.org" +* Lecture 3 + #+INCLUDE: "./lectures/3.org" +* Lecture 4 + #+INCLUDE: "./lectures/4.org" +* Lecture 5 + #+INCLUDE: "./lectures/5.org" +* Lecture 6 + #+INCLUDE: "./lectures/6.org" +* Lecture 7 + #+INCLUDE: "./lectures/7.org" +* Lecture 8 + #+INCLUDE: "./lectures/8.org" diff --git a/main.tsk b/main.tsk new file mode 100644 index 0000000..e7b4814 --- /dev/null +++ b/main.tsk @@ -0,0 +1,7 @@ +*Export to HTML +#do +emacs --script export.el + +*Remove Intermediate +#do +rm main.html~ \ No newline at end of file diff --git a/org/theme-bigblow-local.setup b/org/theme-bigblow-local.setup new file mode 100644 index 0000000..81fca7b --- /dev/null +++ b/org/theme-bigblow-local.setup @@ -0,0 +1,15 @@ +# -*- mode: org; -*- + +#+OPTIONS: html-style:nil +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: + +#+HTML_HEAD: +#+HTML_HEAD: + +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: diff --git a/org/theme-bigblow.setup b/org/theme-bigblow.setup new file mode 100644 index 0000000..9e2a3e8 --- /dev/null +++ b/org/theme-bigblow.setup @@ -0,0 +1,16 @@ +# -*- mode: org; -*- + +#+OPTIONS: html-style:nil +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: + +#+HTML_HEAD: +#+HTML_HEAD: + +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: diff --git a/org/theme-readtheorg-local.setup b/org/theme-readtheorg-local.setup new file mode 100644 index 0000000..2ca1deb --- /dev/null +++ b/org/theme-readtheorg-local.setup @@ -0,0 +1,10 @@ +# -*- mode: org; -*- + +#+OPTIONS: html-style:nil +#+HTML_HEAD: +#+HTML_HEAD: + +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: diff --git a/org/theme-readtheorg.setup b/org/theme-readtheorg.setup new file mode 100644 index 0000000..3d01e6a --- /dev/null +++ b/org/theme-readtheorg.setup @@ -0,0 +1,10 @@ +# -*- mode: org; -*- + +#+OPTIONS: html-style:nil +#+HTML_HEAD: +#+HTML_HEAD: + +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: +#+HTML_HEAD: diff --git a/src/bigblow_theme/css/bigblow.css b/src/bigblow_theme/css/bigblow.css new file mode 100644 index 0000000..0282348 --- /dev/null +++ b/src/bigblow_theme/css/bigblow.css @@ -0,0 +1,757 @@ +/* bigblow.css --- BigBlow CSS file */ + +/* Copyright (C) 2011-2014 All Right Reserved, Fabrice Niessen */ + +/* This file is free software: you can redistribute it and/or */ +/* modify it under the terms of the GNU General Public License as */ +/* published by the Free Software Foundation, either version 3 of */ +/* the License, or (at your option) any later version. */ + +/* This file is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ + +/* Author: Fabrice Niessen <(concat "fniessen" at-sign "pirilampo.org")> */ +/* URL: https://github.com/fniessen/org-html-themes/ */ +/* Version: 20140605.0925 */ + +html, body { + border: 0; + margin: 0; + padding: 0; +} + +@media print { + .dontprint { + display: none; + } +} + +#preamble { + background: none repeat scroll 0 0 #46484B; + color: #FFFFFF; + font-family: Arial,Helvetica,sans-serif; + font-size: 70%; + font-weight: bold; + height: 224px; /* so that content begins after... */ + padding: 2px 2px 0 6px; +} + +#banner { + text-align: center; +} + +#tabs { + background-color: #2061A2; + float: left; + margin: 0px 0px 20px 0px; + padding: 0; + width: 100%; +} + + #tabs ul { + margin: 0; + padding: 0; + } + + #tabs li { + float: left; + list-style-type: none; + margin: 0px 2px 0px 0px; + padding: 0px 0px 0px 0px; + white-space: nowrap; + } + + #tabs li a { + color: #EEEEEE; + display: block; + font-size: 13px; + font-weight: bold; + margin: 0; + padding: 4px 10px 4px 10px; + text-decoration: none; + } + + #tabs li a:hover { + background: #759FCF; + color: #FFFFFF; + } + + #tabs li.ui-tabs-active a { + background: #FFFFFF; + color: #555555; + } + +#content { + clear: both; + background-color: #FFFFFF; + font-size: 100%; + margin: 0 auto; + max-width: 810px; + overflow-x: hidden; + overflow-y: auto; + padding: 0px 10px 2px 10px; +} + +#postamble { + color: #999999; + font-family: Arial,Helvetica,sans-serif; + font-size: 70%; + height: 40px; + margin: 0 auto; + max-width: 810px; + padding-right: 30px; + padding-top: 22px; + padding: 2px 2px 0 6px; + text-align: right; +} + +body { + color: #333333; + font: 13px/1.385 arial,helvetica,clean,sans-serif; + margin: 0 .1em; + padding: 0; +} + +b { + color: #000000; +} + +i { + color: #1A1A1A; +} + +h1, ul#tabs, h2, h3, h4, h5 { + font-family: "Trebuchet MS",Verdana,sans-serif; +} + +h1 { + background-color: #0A3F69; + color: #F8F8F8; + font-size: 24px; + margin: 0; + padding: 9px 0px 0px 10px; +} + +h2 { + border-bottom: 4px solid #67B3E3; + color: #13679D; + font-size: 20px; +} + +h3, h4, h5, h6 { + color: #1572AE; +} + +h3 { + border-bottom: 1px solid #B5DAF1; + font-size: 16px; + margin-left: 25px; +} + +h4 { + border-bottom: 1px dotted #C9E3F5; + font-size: 14px; + margin-left: 60px; +} + +h5 { + font-size: 1em; + margin-left: 87px; +} + +h6 { + font-size: 1em; + margin-left: 100px; +} + +.DONEheader { + color: #ADADAD; + text-decoration: line-through; +} + + h3.DONEheader { + border-bottom: 1px solid #DDDDDD; + } + + h4.DONEheader { + border-bottom: 1px dotted #DDDDDD; + } + +.outline-text-2, .outline-text-3, .outline-text-4, .outline-text-5, +.outline-3 > ul, /* for HTML export of Beamer slides */ +.outline-4 > ol, #text-footnotes { + margin-left: 100px; +} + +li > .outline-text-5 { + margin-left: 20px; +} + +ul, ol { + padding-left: 1.5em; +} + +dt { + color: #1572AE; + font-weight: bold; +} + +dd { + margin-bottom: 6px; +} + +pre { + /* Use #EAEAEA for background-color of border with src code block's name */ + background-color: #F8F8FF; + border: 1px solid #DEDEDE; + color: #444444; + font-family: monospace; + line-height: 1.14em; + overflow: auto; + /* overflow-x: auto; */ + padding: 10px; +} + +code { + background-color: #F8F8FF; + border: 1px solid #DEDEDE; + color: #444444; + font-family: monospace; + /* font-size: 0.93em; */ + margin: 0px 1px; + padding: 0px 2px; +} + +li > p, li > ul, li > .inlinetask, li > dl { + margin-left: 0px; +} + +dd > p, dd > ul, dd > .inlinetask, dd > dl { + margin-left: 0px; +} + +li.checked { + list-style-image: url('../images/checked.png'); +} + +li.halfchecked { + list-style-image: url('../images/halfchecked.png'); +} + +li.unchecked { + list-style-image: url('../images/unchecked.png'); +} + +a, a:link, a:visited { + color: #2061A2; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +a:focus { + outline: none; +} + +#search { + border-radius: 3px; + background: none repeat scroll 0 0 #FFFFFF; + display: block; + float: right; + height: 18px; + margin: 5px 10px 0 0; + overflow: hidden; + padding: 0 3px; + width: 188px; +} + + #search input { + border: 0 none; + color: #666666; + float: left; + font-family: Arial,Helvetica,sans-serif; + font-size: 11px; + font-weight: normal; + margin: 0; + padding: 2px 4px; + width: 160px; + } + + #search button { + background: url("../images/search-glass.png") no-repeat scroll 0 50% transparent; + border: 0 none; + cursor: pointer; + display: block; + float: right; + height: 18px; + margin: 0; + text-indent: -999em; + width: 14px; + } + +table { + border-collapse: collapse; + margin-right: auto; + margin-left: auto; +} + + table td { + padding: 3px 5px; + } + +table, th, td +{ + border: 1px solid #B5DAF1; + border-left: 2px solid white; + border-right: 2px solid white; +} + +th +{ + border-width: 1px 2px; + border-color: white; + background-color: #2061A2; + color: white; +} + +caption { + color: #8D8D84; +} + +img { + display: block; + margin-left: auto; + margin-right: auto; + text-align: center; +} + +.figure { + color: #8D8D84; + text-align: center; +} + +.fixme { + background: #FFFF88 url('../images/fixme.png') no-repeat top left; + color: #CC0000; + display: inline-block; + height: 16px; + text-indent: -9999px; + width: 82px; +} + +.left { + text-align: left; +} + +.right { + text-align: right; +} + +.center { + text-align: center; +} + +.justify { + text-align: justify; +} + +.inlinetask { + background-color: #F7F7F7; + border-collapse: separate; + border-color: #EEEEEE #EEEEEE #EEEEEE #1E90FF; + border-style: solid; + border-width: 1px 1px 1px 6px; + padding: 8px 8px 0px 8px; + margin: 10px 0px; +} + + .inlinetask td { + padding: 2px 5px 0px 2px; + border: 0px; + } + +.info { + border: 1px solid; + background: url('../images/info.png') no-repeat 10px 10px #BDE5F8; + color: #00529B; + padding: 4px 10px 4px 52px; + border-top-left-radius: 5px; + border-top-right-radius: 5px; + border-bottom-right-radius: 5px; + border-bottom-left-radius: 5px; + margin: 10px 0px; +} + +.tip { + border: 1px solid; + background: url('../images/tip.png') no-repeat 10px 10px #DFF2BF; + color: #4F8A10; + padding: 4px 10px 4px 52px; + border-top-left-radius: 5px; + border-top-right-radius: 5px; + border-bottom-right-radius: 5px; + border-bottom-left-radius: 5px; + margin: 10px 0px; +} + +.note { + border: 1px solid; + background: url('../images/note.png') no-repeat 10px 10px #FFFCCB; + color: #9F6000; + padding: 4px 10px 4px 52px; + border-top-left-radius: 5px; + border-top-right-radius: 5px; + border-bottom-right-radius: 5px; + border-bottom-left-radius: 5px; + margin: 10px 0px; +} + +.warning { + border: 1px solid; + background: url('../images/warning.png') no-repeat 10px 10px #FFBABA; + color: #D8000C; + padding: 4px 10px 4px 52px; + border-top-left-radius: 5px; + border-top-right-radius: 5px; + border-bottom-right-radius: 5px; + border-bottom-left-radius: 5px; + margin: 10px 0px; +} + +.todo, .done { + margin: 10px 0; + padding: 0px 2px; +} + +.NEW { + background-color: #FDFCD8; + border: 1px solid #EEE9C3; + color: #302B13; + font-weight: normal; +} + +.TODO { + background-color: #FED5D7; + border: 1px solid #FC5158; + color: #FC5158; +} + +.STRT, .STARTED { + background-color: #FEF2D4; + border: 1px solid #FDBF3D; + color: #FDBF3D; +} + +.WAIT, .WAITING, .DLGT, .DELEGATED { + background-color: #DFFFDF; + border: 1px solid #55BA80; + color: #55BA80; +} + +.SDAY, .SOMEDAY, .DFRD, .DEFERRED { + background-color: #D3EEFF; + border: 1px solid #42B5FF; + color: #42B5FF; +} + +.DONE, .CANX, .CANCELED { + background-color: #F2F2EE; + border: 1px solid #969696; + color: #969696; +} + +.tag span { + background-color: #EDEDED; + border: 1px solid #EDEDED; + color: #939393; + cursor: pointer; + display: block; + float: right; + font-size: 80%; + font-weight: normal; + margin: 0 3px; + padding: 1px 2px; + border-radius: 10px; +} + + #right-panel-contents .tag span { + font-size: 100%; + } + + .tag span:hover { + background: #BABDB6; + } + + .tag .FLAGGED { + background-color: #EDC6C8; + border: 1px solid #EDC6C8; + color: #C15F4E; + } + +.tag .selected { + background-color: #FFEBC1; + border: 1px solid #FDBF3B; + color: #A6750C; +} + +#listOfTags .tag span { + display: inline; + float: none; +} + +span.todo { + cursor: pointer; + /* display: block; */ + /* float: left; */ + margin: -1px 3px 0px 0px; +} + + span.todo:hover { + background: #BABDB6; + color: #888888; + } + +span.todo .selected { + background-color: #FFEBC1; + border-color: #FDBF3B; + color: #A6750C; +} + +.matchtag { + background-color: #FBFFC0; +} + +.matchNEW { + background-color: #FDFCD8; +} + +.matchTODO { + background-color: #FFE6E4; +} + +.matchSTRT { + background-color: #FEF2D4; +} + +.matchWAIT, .matchDLGT { + background-color: #DFFFDF; +} + +.matchSDAY, .matchDFRD { + background-color: #E0EFFF; +} + +#listOfTodo, #listOfDone, #listOfTags { + /* bottom: 10px; /\* together with this to put the div at the bottom*\/ */ + /* left: 10px; */ + /* list-style-type: none; */ + margin-left: 0px; + /* position: fixed; /\* this is the magic *\/ */ +} + +.timestamp-kwd { + background-color: #FFF1F1; + color: #880000; + margin: 0px 4px 0px 0px; + padding: 2px 0px 2px 2px; +} + +.timestamp { + color: #777777; + font-size: 80%; +} + +#table-of-contents { + background-color: #FFFFDD; + border: 1px solid #E4E4E4; + display: table; + line-height: 1.2em; + padding: 4px; + margin: 4px; + max-width: 400px; + float: right; + width: auto; +} + + #table-of-contents h2 { + display: none; + } + + #table-of-contents ul { + margin: 0; + padding: 0; + } + + #table-of-contents li { + list-style-type: none; + margin: 0; + } + + #table-of-contents li li { + margin-left: 1.5em; + } + + #table-of-contents li li li { + font-size: 0.8em; + } + + #table-of-contents a { + color: #606060; + font-size: 0.9em; + font-weight: normal; + text-decoration: none; + } + + #table-of-contents a:hover { + color: #C61A1A; + text-decoration: underline; + } + +#minitoc { + background-color: #FFFFDD; + border: 1px solid #E4E4E4; + color: #484848; + line-height: 1.2em; + margin: 12px; + padding: 4px; + position: fixed; + width: auto; + white-space: pre; +} + +#minitoc a { + display: block; + font-size: 10px; + font-weight: normal; +} + +#minitoc a { + display: none; +} + +#minitoc:hover a { + display: block; +} + +#minitoc h2 { + margin: 3px 0px; + border: none; + font-size: 11px; +} + +p.verse { + color: #808080; + font-style: italic; +} + +.example { + background-color: #DDFADE; + border: 1px solid #9EC49F; + color: #333333; +} + +.alert { + font-weight: bold; + color: #FF0000; +} + +#toTop { + background: #F7F7F7; + border: 1px solid #CCCCCC; + top: 10px; /* together with this to put the div at the top */ + color: #333333; + cursor: pointer; + display: none; + font-family: verdana; + font-size: 11px; + padding: 5px; + position: fixed; /* this is the magic */ + right: 10px; + text-align: center; + width: 100px; + z-index: 999; +} + + #toTop:hover { + text-decoration: underline; + } + +#left-panel-wrapper { + position: fixed; + z-index: 200; + /* display: none; /\* hide the panel if Javascript is not running *\/ */ +} + +#left-panel-contents { + background-color: #EFEFEF; + border-right: 1px dotted #ADADAD; + display: none; + height: 100%; + left: 0px; + position: fixed; + text-align: left; + top: 0; + width: 199px; + z-index: 200; + padding-top: 7px; + padding-left: 7px; +} + +#left-panel-button { + transform: rotate(90deg); + background-color: #EFEFEF; + border: 1px dotted #ADADAD; + border-bottom-width: 0px; + left: -23px; + position: fixed; + top: 50%; + z-index: 200; + padding: 2px 5px 5px 5px; +} + +#right-panel-wrapper { + position: fixed; + z-index: 200; + /* display: none; /\* hide the panel if Javascript is not running *\/ */ +} + +#right-panel-contents { + background-color: #EFEFEF; + border-left: 1px dotted #ADADAD; + display: none; + height: 100%; + right: 0px; + position: fixed; + text-align: left; + top: 0; + width: 199px; + z-index: 200; + padding-top: 7px; + padding-left: 7px; +} + +.org-src-container { + position: relative; +} + +.snippet-copy-to-clipboard { + display: none; + position: absolute; + right: 10px; + top: 5px; + font-size: 0.9em; + text-decoration:underline; +} + +.copy-to-clipboard-button:hover { + cursor: pointer; +} + +/* This is for the sake of Emacs. */ +/* Local Variables: */ +/* eval: (when (locate-library "rainbow-mode") (require 'rainbow-mode) (rainbow-mode)) */ +/* End: */ diff --git a/src/bigblow_theme/css/hideshow.css b/src/bigblow_theme/css/hideshow.css new file mode 100644 index 0000000..e0263c3 --- /dev/null +++ b/src/bigblow_theme/css/hideshow.css @@ -0,0 +1,116 @@ +/* hideshow.css --- HideShow CSS file */ + +/* Copyright (C) 2014 All Right Reserved, Fabrice Niessen */ + +/* This file is free software: you can redistribute it and/or */ +/* modify it under the terms of the GNU General Public License as */ +/* published by the Free Software Foundation, either version 3 of */ +/* the License, or (at your option) any later version. */ + +/* This file is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ + +/* Author: Fabrice Niessen <(concat "fniessen" at-sign "pirilampo.org")> */ +/* URL: https://github.com/fniessen/hide-show/ */ +/* Version: 20140912.1722 */ + +.hsExpanded.hsAnchor {background: #EE7700;} + +.buttons { + padding: 0px 7px 13px 0px; + background: #0A3F69; +} + +.hsButton { + color: white; + float: right; + font-size: 70%; + margin-left: 10px; +} + + .hsButton:hover { + background: #FBE448; + color: black; + cursor: pointer; + } + +.ellipsis { + color: #999999; + /* background-color: #FFF8C0; */ + /* float: right; */ + margin-left: 0.6em; +} + +.hsReview { + border: 1px solid #A4A4A4; + background-color: white; + z-index: 500; /* must be greater then z-index of hsOverlay */ + position: relative; /* required for z-index to work */ +} + +#hsOverlay { + width: 100%; + height: 100%; + position: fixed; + left: 0px; + top: 0px; + background-color: #000; + opacity: .70; + z-index: 250; /* must be greater than any other z-index (except the one for .hsReview */ +} + +.hsReviewPanel { + background-color: #757176; + color: white; + line-height: 1.1em; + margin: 10px 0px; + padding: 10px; + position: fixed; + width: auto; + bottom: 0px; + right: 0px; + z-index: 501; + text-align: center; +} + + .hsReviewPanel.hsReviewing { + display: none; + } + + .hsReviewPanel:hover { + cursor: pointer; + } + +.hsReviewingPanel { + background-color: white; + color: #757176; + line-height: 1.1em; + margin: 10px 0px; + padding: 10px; + position: fixed; + width: auto; + bottom: 0px; + right: 0px; + z-index: 501; + text-align: center; +} + +.hsReviewButton:hover { + cursor: pointer; +} + +.hsUnselectable { + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +/* This is for the sake of Emacs. */ +/* Local Variables: */ +/* eval: (when (locate-library "rainbow-mode") (require 'rainbow-mode) (rainbow-mode)) */ +/* End: */ diff --git a/src/bigblow_theme/css/htmlize.css b/src/bigblow_theme/css/htmlize.css new file mode 100644 index 0000000..f08e781 --- /dev/null +++ b/src/bigblow_theme/css/htmlize.css @@ -0,0 +1,145 @@ +.org-bold { /* bold */ font-weight: bold; } +.org-bold-italic { /* bold-italic */ font-weight: bold; font-style: italic; } +.org-buffer-menu-buffer { /* buffer-menu-buffer */ font-weight: bold; } +.org-builtin { /* font-lock-builtin-face */ color: #7a378b; } +.org-button { /* button */ text-decoration: underline; } +.org-calendar-today { /* calendar-today */ text-decoration: underline; } +.org-change-log-acknowledgement { /* change-log-acknowledgement */ color: #b22222; } +.org-change-log-conditionals { /* change-log-conditionals */ color: #a0522d; } +.org-change-log-date { /* change-log-date */ color: #8b2252; } +.org-change-log-email { /* change-log-email */ color: #a0522d; } +.org-change-log-file { /* change-log-file */ color: #0000ff; } +.org-change-log-function { /* change-log-function */ color: #a0522d; } +.org-change-log-list { /* change-log-list */ color: #a020f0; } +.org-change-log-name { /* change-log-name */ color: #008b8b; } +.org-comint-highlight-input { /* comint-highlight-input */ font-weight: bold; } +.org-comint-highlight-prompt { /* comint-highlight-prompt */ color: #00008b; } +.org-comment { /* font-lock-comment-face */ color: #b22222; } +.org-comment-delimiter { /* font-lock-comment-delimiter-face */ color: #b22222; } +.org-completions-annotations { /* completions-annotations */ font-style: italic; } +.org-completions-common-part { /* completions-common-part */ color: #000000; background-color: #ffffff; } +.org-completions-first-difference { /* completions-first-difference */ font-weight: bold; } +.org-constant { /* font-lock-constant-face */ color: #008b8b; } +.org-diary { /* diary */ color: #ff0000; } +.org-diff-context { /* diff-context */ color: #7f7f7f; } +.org-diff-file-header { /* diff-file-header */ background-color: #b3b3b3; font-weight: bold; } +.org-diff-function { /* diff-function */ background-color: #cccccc; } +.org-diff-header { /* diff-header */ background-color: #cccccc; } +.org-diff-hunk-header { /* diff-hunk-header */ background-color: #cccccc; } +.org-diff-index { /* diff-index */ background-color: #b3b3b3; font-weight: bold; } +.org-diff-nonexistent { /* diff-nonexistent */ background-color: #b3b3b3; font-weight: bold; } +.org-diff-refine-change { /* diff-refine-change */ background-color: #d9d9d9; } +.org-dired-directory { /* dired-directory */ color: #0000ff; } +.org-dired-flagged { /* dired-flagged */ color: #ff0000; font-weight: bold; } +.org-dired-header { /* dired-header */ color: #228b22; } +.org-dired-ignored { /* dired-ignored */ color: #7f7f7f; } +.org-dired-mark { /* dired-mark */ color: #008b8b; } +.org-dired-marked { /* dired-marked */ color: #ff0000; font-weight: bold; } +.org-dired-perm-write { /* dired-perm-write */ color: #b22222; } +.org-dired-symlink { /* dired-symlink */ color: #a020f0; } +.org-dired-warning { /* dired-warning */ color: #ff0000; font-weight: bold; } +.org-doc { /* font-lock-doc-face */ color: #8b2252; } +.org-escape-glyph { /* escape-glyph */ color: #a52a2a; } +.org-file-name-shadow { /* file-name-shadow */ color: #7f7f7f; } +.org-flyspell-duplicate { /* flyspell-duplicate */ color: #cdad00; font-weight: bold; text-decoration: underline; } +.org-flyspell-incorrect { /* flyspell-incorrect */ color: #ff4500; font-weight: bold; text-decoration: underline; } +.org-fringe { /* fringe */ background-color: #f2f2f2; } +.org-function-name { /* font-lock-function-name-face */ color: #0000ff; } +.org-header-line { /* header-line */ color: #333333; background-color: #e5e5e5; } +.org-help-argument-name { /* help-argument-name */ font-style: italic; } +.org-highlight { /* highlight */ background-color: #b4eeb4; } +.org-holiday { /* holiday */ background-color: #ffc0cb; } +.org-isearch { /* isearch */ color: #b0e2ff; background-color: #cd00cd; } +.org-isearch-fail { /* isearch-fail */ background-color: #ffc1c1; } +.org-italic { /* italic */ font-style: italic; } +.org-keyword { /* font-lock-keyword-face */ color: #a020f0; } +.org-lazy-highlight { /* lazy-highlight */ background-color: #afeeee; } +.org-link { /* link */ color: #0000ff; text-decoration: underline; } +.org-link-visited { /* link-visited */ color: #8b008b; text-decoration: underline; } +.org-log-edit-header { /* log-edit-header */ color: #a020f0; } +.org-log-edit-summary { /* log-edit-summary */ color: #0000ff; } +.org-log-edit-unknown-header { /* log-edit-unknown-header */ color: #b22222; } +.org-match { /* match */ background-color: #ffff00; } +.org-next-error { /* next-error */ background-color: #eedc82; } +.org-nobreak-space { /* nobreak-space */ color: #a52a2a; text-decoration: underline; } +.org-org-archived { /* org-archived */ color: #7f7f7f; } +.org-org-block { /* org-block */ color: #7f7f7f; } +.org-org-block-begin-line { /* org-block-begin-line */ color: #b22222; } +.org-org-block-end-line { /* org-block-end-line */ color: #b22222; } +.org-org-checkbox { /* org-checkbox */ font-weight: bold; } +.org-org-checkbox-statistics-done { /* org-checkbox-statistics-done */ color: #228b22; font-weight: bold; } +.org-org-checkbox-statistics-todo { /* org-checkbox-statistics-todo */ color: #ff0000; font-weight: bold; } +.org-org-clock-overlay { /* org-clock-overlay */ background-color: #ffff00; } +.org-org-code { /* org-code */ color: #7f7f7f; } +.org-org-column { /* org-column */ background-color: #e5e5e5; } +.org-org-column-title { /* org-column-title */ background-color: #e5e5e5; font-weight: bold; text-decoration: underline; } +.org-org-date { /* org-date */ color: #a020f0; text-decoration: underline; } +.org-org-document-info { /* org-document-info */ color: #191970; } +.org-org-document-info-keyword { /* org-document-info-keyword */ color: #7f7f7f; } +.org-org-document-title { /* org-document-title */ color: #191970; font-size: 144%; font-weight: bold; } +.org-org-done { /* org-done */ color: #228b22; font-weight: bold; } +.org-org-drawer { /* org-drawer */ color: #0000ff; } +.org-org-ellipsis { /* org-ellipsis */ color: #b8860b; text-decoration: underline; } +.org-org-footnote { /* org-footnote */ color: #a020f0; text-decoration: underline; } +.org-org-formula { /* org-formula */ color: #b22222; } +.org-org-headline-done { /* org-headline-done */ color: #bc8f8f; } +.org-org-hide { /* org-hide */ color: #ffffff; } +.org-org-latex-and-export-specials { /* org-latex-and-export-specials */ color: #8b4513; } +.org-org-level-1 { /* org-level-1 */ color: #0000ff; } +.org-org-level-2 { /* org-level-2 */ color: #a0522d; } +.org-org-level-3 { /* org-level-3 */ color: #a020f0; } +.org-org-level-4 { /* org-level-4 */ color: #b22222; } +.org-org-level-5 { /* org-level-5 */ color: #228b22; } +.org-org-level-6 { /* org-level-6 */ color: #008b8b; } +.org-org-level-7 { /* org-level-7 */ color: #7a378b; } +.org-org-level-8 { /* org-level-8 */ color: #8b2252; } +.org-org-link { /* org-link */ color: #0000ff; text-decoration: underline; } +.org-org-meta-line { /* org-meta-line */ color: #b22222; } +.org-org-mode-line-clock { /* org-mode-line-clock */ color: #000000; background-color: #bfbfbf; } +.org-org-mode-line-clock-overrun { /* org-mode-line-clock-overrun */ color: #000000; background-color: #ff0000; } +.org-org-quote { /* org-quote */ color: #7f7f7f; } +.org-org-scheduled { /* org-scheduled */ color: #006400; } +.org-org-scheduled-previously { /* org-scheduled-previously */ color: #b22222; } +.org-org-scheduled-today { /* org-scheduled-today */ color: #006400; } +.org-org-sexp-date { /* org-sexp-date */ color: #a020f0; } +.org-org-special-keyword { /* org-special-keyword */ color: #a020f0; } +.org-org-table { /* org-table */ color: #0000ff; } +.org-org-tag { /* org-tag */ font-weight: bold; } +.org-org-target { /* org-target */ text-decoration: underline; } +.org-org-time-grid { /* org-time-grid */ color: #b8860b; } +.org-org-todo { /* org-todo */ color: #ff0000; font-weight: bold; } +.org-org-upcoming-deadline { /* org-upcoming-deadline */ color: #b22222; } +.org-org-verbatim { /* org-verbatim */ color: #7f7f7f; } +.org-org-verse { /* org-verse */ color: #7f7f7f; } +.org-org-warning { /* org-warning */ color: #ff0000; font-weight: bold; } +.org-outline-1 { /* outline-1 */ color: #0000ff; } +.org-outline-2 { /* outline-2 */ color: #a0522d; } +.org-outline-3 { /* outline-3 */ color: #a020f0; } +.org-outline-4 { /* outline-4 */ color: #b22222; } +.org-outline-5 { /* outline-5 */ color: #228b22; } +.org-outline-6 { /* outline-6 */ color: #008b8b; } +.org-outline-7 { /* outline-7 */ color: #7a378b; } +.org-outline-8 { /* outline-8 */ color: #8b2252; } +.org-preprocessor { /* font-lock-preprocessor-face */ color: #7a378b; } +.org-query-replace { /* query-replace */ color: #b0e2ff; background-color: #cd00cd; } +.org-regexp-grouping-backslash { /* font-lock-regexp-grouping-backslash */ font-weight: bold; } +.org-regexp-grouping-construct { /* font-lock-regexp-grouping-construct */ font-weight: bold; } +.org-region { /* region */ background-color: #eedc82; } +.org-secondary-selection { /* secondary-selection */ background-color: #ffff00; } +.org-shadow { /* shadow */ color: #7f7f7f; } +.org-show-paren-match { /* show-paren-match */ background-color: #40e0d0; } +.org-show-paren-mismatch { /* show-paren-mismatch */ color: #ffffff; background-color: #a020f0; } +.org-string { /* font-lock-string-face */ color: #8b2252; } +.org-tool-bar { /* tool-bar */ color: #000000; background-color: #bfbfbf; } +.org-tooltip { /* tooltip */ color: #000000; background-color: #ffffe0; } +.org-trailing-whitespace { /* trailing-whitespace */ background-color: #ff0000; } +.org-type { /* font-lock-type-face */ color: #228b22; } +.org-underline { /* underline */ text-decoration: underline; } +.org-variable-name { /* font-lock-variable-name-face */ color: #a0522d; } +.org-warning { /* font-lock-warning-face */ color: #ff0000; font-weight: bold; } +.org-widget-button { /* widget-button */ font-weight: bold; } +.org-widget-button-pressed { /* widget-button-pressed */ color: #ff0000; } +.org-widget-documentation { /* widget-documentation */ color: #006400; } +.org-widget-field { /* widget-field */ background-color: #d9d9d9; } +.org-widget-inactive { /* widget-inactive */ color: #7f7f7f; } +.org-widget-single-line-field { /* widget-single-line-field */ background-color: #d9d9d9; } diff --git a/src/bigblow_theme/images/checked.png b/src/bigblow_theme/images/checked.png new file mode 100644 index 0000000..ebdc03b Binary files /dev/null and b/src/bigblow_theme/images/checked.png differ diff --git a/src/bigblow_theme/images/fixme.png b/src/bigblow_theme/images/fixme.png new file mode 100644 index 0000000..5b861d8 Binary files /dev/null and b/src/bigblow_theme/images/fixme.png differ diff --git a/src/bigblow_theme/images/halfchecked.png b/src/bigblow_theme/images/halfchecked.png new file mode 100644 index 0000000..afd695e Binary files /dev/null and b/src/bigblow_theme/images/halfchecked.png differ diff --git a/src/bigblow_theme/images/info.png b/src/bigblow_theme/images/info.png new file mode 100644 index 0000000..83de654 Binary files /dev/null and b/src/bigblow_theme/images/info.png differ diff --git a/src/bigblow_theme/images/note.png b/src/bigblow_theme/images/note.png new file mode 100644 index 0000000..1c6b8eb Binary files /dev/null and b/src/bigblow_theme/images/note.png differ diff --git a/src/bigblow_theme/images/tip.png b/src/bigblow_theme/images/tip.png new file mode 100644 index 0000000..743ef89 Binary files /dev/null and b/src/bigblow_theme/images/tip.png differ diff --git a/src/bigblow_theme/images/unchecked.png b/src/bigblow_theme/images/unchecked.png new file mode 100644 index 0000000..b75b7a9 Binary files /dev/null and b/src/bigblow_theme/images/unchecked.png differ diff --git a/src/bigblow_theme/images/warning.png b/src/bigblow_theme/images/warning.png new file mode 100644 index 0000000..296415e Binary files /dev/null and b/src/bigblow_theme/images/warning.png differ diff --git a/src/bigblow_theme/js/ZeroClipboard.swf b/src/bigblow_theme/js/ZeroClipboard.swf new file mode 100644 index 0000000..13bf8e3 Binary files /dev/null and b/src/bigblow_theme/js/ZeroClipboard.swf differ diff --git a/src/bigblow_theme/js/bigblow.js b/src/bigblow_theme/js/bigblow.js new file mode 100644 index 0000000..6ee48da --- /dev/null +++ b/src/bigblow_theme/js/bigblow.js @@ -0,0 +1,525 @@ +// bigblow.js --- BigBlow JS file +// +// Copyright (C) 2011-2016 All Right Reserved, Fabrice Niessen +// +// This file is free software: you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation, either version 3 of +// the License, or (at your option) any later version. +// +// This file is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// Author: Fabrice Niessen <(concat "fniessen" at-sign "pirilampo.org")> +// URL: https://github.com/fniessen/org-html-themes/ +// Version: 20140515.1841 + +$(function() { + $('p'). + html(function(index, old) { + return old.replace('FIXME', + 'FIXME'); + }); + $('p'). + html(function(index, old) { + return old.replace('XXX', + 'XXX'); + }); +}); + +// Remove leading section number +$(function() { + $('.section-number-2').text(""); + for (var i = 3; i <= 5; i++) { + $('.section-number-' + i).each(function() { + $(this).text($(this).text().replace(/^[0-9]+\./g, "")); + }); + } +}); + +$(function() { + $('
').prependTo('body'); +}); + +// generate contents of minitoc +function generateMiniToc(divId) { + let headers = null; + if(divId) { + $('#minitoc').empty().append('

In this section

'); + headers = $('#' + divId).find('h3'); + } + else { + $('#minitoc').empty().append('

In this document

'); + headers = $('div#content').find(':header'); + } + headers.each(function(i) { + let text = $(this) + .clone() //clone the element + .children() //select all the children + .remove() //remove all the children + .end() //again go back to selected element + .text().trim(); + var level = parseInt(this.nodeName.substring(1), 10); + let prefix = "".padStart(level-1, " "); + $("#minitoc").append("" + + prefix + text + ""); + }); + // Ensure that the target is expanded (hideShow) + $('#minitoc a[href^="#"]').click(function() { + var href = $(this).attr('href'); + hsExpandAnchor(href); + }); +} + +// display tabs +function tabifySections() { + + // hide TOC (if present) + $('#table-of-contents').hide(); + + // grab the list of `h2' from the page + var allSections = []; + $('h2') + .each(function() { + // Remove TODO keywords and tags (contained in spans) + var tabText = $(this).clone().find('span').remove().end() + .text().trim(); + var tabId = $(this).parent().attr('id'); + if (tabText) { + // - remove heading number (all leading digits) + // - remove progress logging (between square brackets) + // - remove leading and trailing spaces + tabText = tabText.replace(/^\d+\s+/, '').replace(/\[[\d/%]+\]/, '').trim(); + + allSections.push({ + text: tabText, + id: tabId + }); + } + }); + + // create the tab links + var tabs = $('
    '); + for (i = 0; i < allSections.length; i++) { + var item = allSections[i]; + html = $('
  • ' + item.text + '
  • '); + tabs.append(html); + } + + // insert tabs menu after title (`h1'), or at the beginning of the content + if($('.title').length !== 0) { + $('.title').after(tabs); + } + else { + $('#content').prepend(tabs); + } +} + +function selectTabAndScroll(href) { + // At this point we assume that href is local (starts with #) + // alert(href); + + // Find the tab to activate + var targetTab = $(href).closest('.ui-tabs-panel'); + var targetTabId = targetTab.attr('id'); + var targetTabAriaLabel = targetTab.attr('aria-labelledby'); + + var targetTabIndex = $("#content ul li") + .index($('[aria-labelledby="' + targetTabAriaLabel + '"]')); + + // Activate target tab + $('#content').tabs('option', 'active', targetTabIndex); + + // Rebuild minitoc + generateMiniToc(targetTabId); + + // Set the location hash + // document.location.hash = href; + + // Scroll to top if href was a tab + if (href == '#' + targetTabId) { + // alert(targetTabId); + $.scrollTo(0); + } + // Scroll to href if href was not a tab + else { + $.scrollTo(href); + } +} + +$(document).ready(function() { + $('#preamble').remove(); + $('#table-of-contents').remove(); + + // Prepare for tabs + tabifySections(); + + // Build the tabs from the #content div + $('#content').tabs(); + + // Set default animation + $('#content').tabs('option', 'show', true); + + // Rebuild minitoc when a tab is activated + $('#content').tabs({ + activate: function(event, ui) { + var divId = ui.newTab.attr('aria-controls'); + generateMiniToc(divId); + } + }); + + // Required to get the link of the tab in URL + $('#content ul').localScroll({ + target: '#content', + duration: 0, + hash: true + }); + + // Handle hash in URL + if ($('#content') && document.location.hash) { + hsExpandAnchor(document.location.hash); + selectTabAndScroll(document.location.hash); + } + // If no hash, build the minitoc anyway for selected tab + else { + var divId = $('#content div[aria-expanded=true]').attr('id'); + generateMiniToc(divId); + } + + // Handle click on internal links + $('.ui-tabs-panel a[href^="#"]').click(function(e) { + var href = $(this).attr('href'); + hsExpandAnchor(href); + selectTabAndScroll(href); + e.preventDefault(); + }); + + // Initialize hideShow + hsInit(); + + // add sticky headers to tables + $('table').stickyTableHeaders(); +}); + +$(document).ready(function() { + // Add copy to clipboard snippets + $('.org-src-container').prepend('
    [copy]
    '); + + // Display/hide snippets on source block mouseenter/mouseleave + $(document).on('mouseenter', '.org-src-container', function () { + $(this).find('.snippet-copy-to-clipboard').show(); + }); + $(document).on('mouseleave', '.org-src-container', function () { + $(this).find('.snippet-copy-to-clipboard').hide(); + }); + + $('.copy-to-clipboard-button').click( function() { + var element = $(this).parent().parent().find('.src'); + var val = element.text(); + val = val.replace(/\n/g, "\r\n"); + + var $copyElement = $("",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,a.appendChild(b),b.innerHTML="",l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){l.noCloneEvent=!1}),b.cloneNode(!0).click()),null==l.deleteExpando){l.deleteExpando=!0;try{delete b.test}catch(d){l.deleteExpando=!1}}a=b=c=null}(),function(){var b,c,d=z.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(l[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),l[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var Y=/^(?:input|select|textarea)$/i,Z=/^key/,$=/^(?:mouse|contextmenu)|click/,_=/^(?:focusinfocus|focusoutblur)$/,ab=/^([^.]*)(?:\.(.+)|)$/;function bb(){return!0}function cb(){return!1}function db(){try{return z.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof n===L||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(F)||[""],h=b.length;while(h--)f=ab.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||"").match(F)||[""],j=b.length;while(j--)if(h=ab.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,m,o=[d||z],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||z,3!==d.nodeType&&8!==d.nodeType&&!_.test(p+n.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[n.expando]?b:new n.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),k=n.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!n.isWindow(d)){for(i=k.delegateType||p,_.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||z)&&o.push(l.defaultView||l.parentWindow||a)}m=0;while((h=o[m++])&&!b.isPropagationStopped())b.type=m>1?i:k.bindType||p,f=(n._data(h,"events")||{})[b.type]&&n._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&n.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&n.acceptData(d)&&g&&d[p]&&!n.isWindow(d)){l=d[g],l&&(d[g]=null),n.event.triggered=p;try{d[p]()}catch(r){}n.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(n._data(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((n.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?n(c,this).index(i)>=0:n.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),ib=/^\s+/,jb=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,kb=/<([\w:]+)/,lb=/\s*$/g,sb={option:[1,""],legend:[1,"
    ","
    "],area:[1,"",""],param:[1,"",""],thead:[1,"","
    "],tr:[2,"","
    "],col:[2,"","
    "],td:[3,"","
    "],_default:l.htmlSerialize?[0,"",""]:[1,"X
    ","
    "]},tb=eb(z),ub=tb.appendChild(z.createElement("div"));sb.optgroup=sb.option,sb.tbody=sb.tfoot=sb.colgroup=sb.caption=sb.thead,sb.th=sb.td;function vb(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==L?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==L?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,vb(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function wb(a){X.test(a.type)&&(a.defaultChecked=a.checked)}function xb(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function yb(a){return a.type=(null!==n.find.attr(a,"type"))+"/"+a.type,a}function zb(a){var b=qb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Ab(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,"globalEval",!b||n._data(b[d],"globalEval"))}function Bb(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Cb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}"script"===c&&b.text!==a.text?(yb(b).text=a.text,zb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&X.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}n.extend({clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!hb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(ub.innerHTML=a.outerHTML,ub.removeChild(f=ub.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=vb(f),h=vb(a),g=0;null!=(e=h[g]);++g)d[g]&&Cb(e,d[g]);if(b)if(c)for(h=h||vb(a),d=d||vb(f),g=0;null!=(e=h[g]);g++)Bb(e,d[g]);else Bb(a,f);return d=vb(f,"script"),d.length>0&&Ab(d,!i&&vb(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k,m=a.length,o=eb(b),p=[],q=0;m>q;q++)if(f=a[q],f||0===f)if("object"===n.type(f))n.merge(p,f.nodeType?[f]:f);else if(mb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(kb.exec(f)||["",""])[1].toLowerCase(),k=sb[i]||sb._default,h.innerHTML=k[1]+f.replace(jb,"<$1>")+k[2],e=k[0];while(e--)h=h.lastChild;if(!l.leadingWhitespace&&ib.test(f)&&p.push(b.createTextNode(ib.exec(f)[0])),!l.tbody){f="table"!==i||lb.test(f)?""!==k[1]||lb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)n.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}n.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),l.appendChecked||n.grep(vb(p,"input"),wb),q=0;while(f=p[q++])if((!d||-1===n.inArray(f,d))&&(g=n.contains(f.ownerDocument,f),h=vb(o.appendChild(f),"script"),g&&Ab(h),c)){e=0;while(f=h[e++])pb.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.deleteExpando,m=n.event.special;null!=(d=a[h]);h++)if((b||n.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k?delete d[i]:typeof d.removeAttribute!==L?d.removeAttribute(i):d[i]=null,c.push(f))}}}),n.fn.extend({text:function(a){return W(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||z).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=xb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=xb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(vb(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&Ab(vb(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(vb(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return W(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(gb,""):void 0;if(!("string"!=typeof a||nb.test(a)||!l.htmlSerialize&&hb.test(a)||!l.leadingWhitespace&&ib.test(a)||sb[(kb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(jb,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(vb(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(vb(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,k=this.length,m=this,o=k-1,p=a[0],q=n.isFunction(p);if(q||k>1&&"string"==typeof p&&!l.checkClone&&ob.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(k&&(i=n.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=n.map(vb(i,"script"),yb),f=g.length;k>j;j++)d=i,j!==o&&(d=n.clone(d,!0,!0),f&&n.merge(g,vb(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,n.map(g,zb),j=0;f>j;j++)d=g[j],pb.test(d.type||"")&&!n._data(d,"globalEval")&&n.contains(h,d)&&(d.src?n._evalUrl&&n._evalUrl(d.src):n.globalEval((d.text||d.textContent||d.innerHTML||"").replace(rb,"")));i=c=null}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],g=n(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Db,Eb={};function Fb(b,c){var d=n(c.createElement(b)).appendTo(c.body),e=a.getDefaultComputedStyle?a.getDefaultComputedStyle(d[0]).display:n.css(d[0],"display");return d.detach(),e}function Gb(a){var b=z,c=Eb[a];return c||(c=Fb(a,b),"none"!==c&&c||(Db=(Db||n("