From 35a77a52ed9548c0a93088303a01264d9b1656b2 Mon Sep 17 00:00:00 2001 From: lomna Date: Sat, 1 Jul 2023 21:32:02 +0530 Subject: [PATCH] Second commit --- lectures/3.org | 8 +- lectures/6.org | 44 ++-- lectures/8.org | 61 ++++- main.html | 600 +++++++++++++++++++++++++++++-------------------- 4 files changed, 444 insertions(+), 269 deletions(-) diff --git a/lectures/3.org b/lectures/3.org index 2c02475..963c45d 100644 --- a/lectures/3.org +++ b/lectures/3.org @@ -20,7 +20,7 @@ A set of instructions in a loop. Iterative instructions can have different compl \\ + If *inner loop iterator doesn't depend on outer loop*, the complexity of the inner loop is multiplied by the number of times outer loop runs to get the time complexity For example, suppose we have loop as -#+BEGIN_SRC +#+BEGIN_SRC C for(int i = 0; i < n; i++){ ... for(int j = 0; j < n; j *= 2){ @@ -35,7 +35,7 @@ Thus the time complexity is *O(n.log(n))*. + If *inner loop and outer loop are related*, then complexities have to be computed using sums. Example, we have loop -#+BEGIN_SRC +#+BEGIN_SRC C for(int i = 0; i <= n; i++){ ... for(int j = 0; j <= i; j++){ @@ -70,7 +70,7 @@ total number of times inner loop runs = $\frac{n^2}{2} + \frac{n}{2}$ *Another example,* \\ Suppose we have loop -#+BEGIN_SRC +#+BEGIN_SRC C for(int i = 1; i <= n; i++){ ... for(int j = 1; j <= i; j *= 2){ @@ -108,7 +108,7 @@ Time complexity = $O(n.log(n))$ ** An example for time complexities of nested loops Suppose a loop, -#+BEGIN_SRC +#+BEGIN_SRC C for(int i = 1; i <= n; i *= 2){ ... for(int j = 1; j <= i; j *= 2){ diff --git a/lectures/6.org b/lectures/6.org index e70a236..9921388 100644 --- a/lectures/6.org +++ b/lectures/6.org @@ -27,15 +27,16 @@ Divide and conquer is a problem solving strategy. In divide and conquer algorith Recursive approach -#+BEGIN_SRC python - # call this function with index = 0 - def linear_search(array, item, index): - if len(array) < 1: - return -1 - elif array[index] == item: - return index - else: - return linear_search(array, item, index + 1) +#+BEGIN_SRC C + // call this function with index = 0 + int linear_search(int array[], int item, int index){ + if( index >= len(array) ) + return -1; + else if (array[index] == item) + return index; + else + return linear_search(array, item, index + 1); + } #+END_SRC *Recursive time complexity* : $T(n) = T(n-1) + 1$ @@ -138,16 +139,21 @@ Recursive approach: * Max and Min element from array ** Straightforward approach -#+BEGIN_SRC python - def min_max(a): - max = min = a[1] - for i in range(2, n): - if a[i] > max: - max = a[i]; - elif a[i] < min: - min = a[i]; - - return (min,max) +#+BEGIN_SRC C + struc min_max {int min; int max;} + min_max(int array[]){ + int max = array[0]; + int min = array[0]; + + for(int i = 0; i < len(array); i++){ + if(array[i] > max) + max = array[i]; + else if(array[i] < min) + min = array[i]; + } + + return (struct min_max) {min, max}; + } #+END_SRC + *Best case* : array is sorted in ascending order. Number of comparisions is $n-1$. Time complexity is $O(n)$. diff --git a/lectures/8.org b/lectures/8.org index 4d5a0be..5745186 100644 --- a/lectures/8.org +++ b/lectures/8.org @@ -15,6 +15,8 @@ It is an inplace sorting technique. In this algorithm, we will get the minimum e } #+END_SRC +** Time complexity + The total number of comparisions is, \[ \text{Total number of comparisions} = (n -1) + (n-2) + (n-3) + ... + (1) \] \[ \text{Total number of comparisions} = \frac{n(n-1)}{2} \] @@ -48,7 +50,7 @@ It is an inplace sorting algorithm. } #+END_SRC -+ Time complexity +** Time complexity *Best Case* : The best case is when input array is already sorted. In this case, we do *(n-1)* comparisions and no swaps. The time complexity will be $\theta (n)$ \\ @@ -90,4 +92,59 @@ Total number of inversions = 1 + 2 = 3 If the inversion of an array is f(n), then the time complexity of the insertion sort will be $\theta (n + f(n))$. * Quick sort -It is a divide and conquer technique. +It is a divide and conquer technique. It uses a partition algorithm which will choose an element from array, then place all smaller elements to it's left and larger to it's right. Then we can take these two parts of the array and recursively place all elements in correct position. For ease, the element chosen by the partition algorithm is either leftmost or rightmost element. + +#+BEGIN_SRC C + void quick_sort(int array[], int low, int high){ + if(low < high){ + int x = partition(array, low, high); + quick_sort(array, low, x-1); + quick_sort(array, x+1, high); + } + } +#+END_SRC + +As we can see, the main component of this algorithm is the partition algorithm. + +** Lomuto partition +The partition algorithm will work as follows: + +#+BEGIN_SRC C + /* Will return the index where the array is partitioned */ + int partition(int array[], int low, int high){ + int pivot = array[high]; + /* This will point to the element greater than pivot */ + int i = low - 1; + + for(int j = low; j < high; j++){ + if(array[j] <= pivot){ + i += 1; + array[i], array[j] = array[j], array[i]; + } + } + + array[i+1], array[high] = array[high], array[i+1]; + return (i + 1); + } +#+END_SRC + ++ Time complexity +For an array of size *n*, the number ofcomparisions done by this algorithm is always *n - 1*. Therefore, the time complexity of this partition algorithm is, +\[ T(n) = \theta (n) \] + +** Time complexity of quicksort ++ *Best Case* : The partition algorithm always divides the array to two equal parts. In this case, the recursive relation becomes + \[ T(n) = 2T(n/2) + \theta (n) \] + Where, $\theta (n)$ is the time complexity for creating partition. + \\ + Using the master's theorem. + \[ T(n) = \theta( n.log(n) ) \] + ++ *Worst Case* : The partition algorithm always creates the partition at one of the extreme positions of the array. This creates a single partition with *n-1* elements. Therefore, the quicksort algorithm has to be called on the remaining *n-1* elements of the array. + \[ T(n) = T(n-1) + \theta (n) \] + Again, $\theta (n)$ is the time complexity for creating partition. + \\ + Using master's theorem + \[ T(n) = \theta (n^2) \] + ++ *Average Case* : diff --git a/main.html b/main.html index 493f30a..2068476 100644 --- a/main.html +++ b/main.html @@ -3,7 +3,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> - + Algorithms @@ -44,149 +44,163 @@

Table of Contents

-
-

1. Lecture 1

+
+

1. Lecture 1

-
-

1.1. Data structure and Algorithm

+
+

1.1. Data structure and Algorithm

  • A data structure is a particular way of storing and organizing data. The purpose is to effectively access and modify data effictively.
  • @@ -213,8 +227,8 @@ During programming we use data structures and algorithms that work on that data.
-
-

1.2. Characteristics of Algorithms

+
+

1.2. Characteristics of Algorithms

An algorithm has follwing characteristics. @@ -230,8 +244,8 @@ An algorithm has follwing characteristics.

-
-

1.3. Behaviour of algorithm

+
+

1.3. Behaviour of algorithm

The behaviour of an algorithm is the analysis of the algorithm on basis of Time and Space. @@ -248,8 +262,8 @@ The preference is traditionally/usually given to better time complexity. But we

-
-

1.3.1. Best, Worst and Average Cases

+
+

1.3.1. Best, Worst and Average Cases

The input size tells us the size of the input given to algorithm. Based on the size of input, the time/storage usage of the algorithm changes. Example, an array with larger input size (more elements) will taken more time to sort. @@ -262,8 +276,8 @@ The input size tells us the size of the input given to algorithm. Based on the s

-
-

1.3.2. Bounds of algorithm

+
+

1.3.2. Bounds of algorithm

Since algorithms are finite, they have bounded time taken and bounded space taken. Bounded is short for boundries, so they have a minimum and maximum time/space taken. These bounds are upper bound and lower bound. @@ -276,12 +290,12 @@ Since algorithms are finite, they have bounded time taken and bounded

-
-

1.4. Asymptotic Notations

+
+

1.4. Asymptotic Notations

-
-

1.4.1. Big-Oh Notation [O]

+
+

1.4.1. Big-Oh Notation [O]

  • The Big Oh notation is used to define the upper bound of an algorithm.
  • @@ -292,16 +306,16 @@ Since algorithms are finite, they have bounded time taken and bounded
-
-

2. Lecture 2

+
+

2. Lecture 2

-
-

2.1. Asymptotic Notations

+
+

2.1. Asymptotic Notations

-
-

2.1.1. Omega Notation [ \(\Omega\) ]

+
+

2.1.1. Omega Notation [ \(\Omega\) ]

  • It is used to shown the lower bound of the algorithm.
  • @@ -315,8 +329,8 @@ Since algorithms are finite, they have bounded time taken and bounded
-
-

2.1.2. Theta Notation [ \(\theta\) ]

+
+

2.1.2. Theta Notation [ \(\theta\) ]

  • If is used to provide the asymptotic equal bound.
  • @@ -330,8 +344,8 @@ Since algorithms are finite, they have bounded time taken and bounded
-
-

2.1.3. Little-Oh Notation [o]

+
+

2.1.3. Little-Oh Notation [o]

  • The little o notation defines the strict upper bound of an algorithm.
  • @@ -341,8 +355,8 @@ Since algorithms are finite, they have bounded time taken and bounded
-
-

2.1.4. Little-Omega Notation [ \(\omega\) ]

+
+

2.1.4. Little-Omega Notation [ \(\omega\) ]

  • The little omega notation defines the strict lower bound of an algorithm.
  • @@ -353,12 +367,12 @@ Since algorithms are finite, they have bounded time taken and bounded
-
-

2.2. Comparing Growth rate of funtions

+
+

2.2. Comparing Growth rate of funtions

-
-

2.2.1. Applying limit

+
+

2.2.1. Applying limit

To compare two funtions \(f(n)\) and \(g(n)\). We can use limit @@ -370,13 +384,13 @@ To compare two funtions \(f(n)\) and \(g(n)\). We can use limit

  • If result is any finite number (constant), then growth of \(g(n)\) = growth of \(f(n)\)
  • -Note : L'Hôpital's rule can be used in this limit. +Note : L'Hôpital's rule can be used in this limit.

    -
    -

    2.2.2. Using logarithm

    +
    +

    2.2.2. Using logarithm

    Using logarithm can be useful to compare exponential functions. When comaparing functions \(f(n)\) and \(g(n)\), @@ -390,8 +404,8 @@ Using logarithm can be useful to compare exponential functions. When comaparing

    -
    -

    2.2.3. Common funtions

    +
    +

    2.2.3. Common funtions

    Commonly, growth rate in increasing order is @@ -403,12 +417,12 @@ Where \(c\) is any constant.

    -
    -

    2.3. Properties of Asymptotic Notations

    +
    +

    2.3. Properties of Asymptotic Notations

    -
    -

    2.3.1. Big-Oh

    +
    +

    2.3.1. Big-Oh

    • Product : \[ Given\ f_1 = O(g_1)\ \ and\ f_2 = O(g_2) \implies f_1 f_2 = O(g_1 g_2) \] \[ Also\ f.O(g) = O(f g) \]
    • @@ -420,11 +434,11 @@ Where \(c\) is any constant.
    -
    -

    2.3.2. Properties

    +
    +

    2.3.2. Properties

    -
    +

    asymptotic-notations-properties.png

    @@ -441,12 +455,12 @@ Where \(c\) is any constant.
    -
    -

    3. Lecture 3

    +
    +

    3. Lecture 3

    -
    -

    3.1. Calculating time complexity of algorithm

    +
    +

    3.1. Calculating time complexity of algorithm

    We will look at three types of situations @@ -458,8 +472,8 @@ We will look at three types of situations

    -
    -

    3.1.1. Sequential instructions

    +
    +

    3.1.1. Sequential instructions

    A sequential set of instructions are instructions in a sequence without iterations and recursions. It is a simple block of instructions with no branches. A sequential set of instructions has time complexity of O(1), i.e., it has constant time complexity. @@ -467,8 +481,8 @@ A sequential set of instructions are instructions in a sequence without iteratio

    -
    -

    3.1.2. Iterative instructions

    +
    +

    3.1.2. Iterative instructions

    A set of instructions in a loop. Iterative instructions can have different complexities based on how many iterations occurs depending on input size. @@ -489,15 +503,16 @@ A set of instructions in a loop. Iterative instructions can have different compl

  • If inner loop iterator doesn't depend on outer loop, the complexity of the inner loop is multiplied by the number of times outer loop runs to get the time complexity For example, suppose we have loop as
  • -
    -for(int i = 0; i < n; i++){
    +
    +
    for(int i = 0; i < n; i++){
       ...
    -  for(int j = 0; j < n; j *= 2){
    +  for(int j = 0; j < n; j *= 2){
         ...
       }
       ...
     }
     
    +

    Here, the outer loop will n times and the inner loop will run log(n) times. Therefore, the total number of time statements in the inner loop run is n.log(n) times. @@ -508,15 +523,16 @@ Thus the time complexity is O(n.log(n)).

  • If inner loop and outer loop are related, then complexities have to be computed using sums. Example, we have loop
  • -
    -for(int i = 0; i <= n; i++){
    +
    +
    for(int i = 0; i <= n; i++){
       ...
    -  for(int j = 0; j <= i; j++){
    +  for(int j = 0; j <= i; j++){
         ...
       }
       ...
     }
     
    +

    Here the outer loop will run n times, so i goes from 0 to n. The number of times inner loop runs is j, which depends on i. @@ -587,15 +603,16 @@ total number of times inner loop runs = \(\frac{n^2}{2} + \frac{n}{2}\)
    Suppose we have loop

    -
    -for(int i = 1; i <= n; i++){
    +
    +
    for(int i = 1; i <= n; i++){
       ...
    -  for(int j = 1; j <= i; j *= 2){
    +  for(int j = 1; j <= i; j *= 2){
         ...
       }
       ...
     }
     
    +

    The outer loop will run n times with i from 1 to n, and inner will run log(i) times. @@ -669,21 +686,22 @@ Time complexity = \(O(n.log(n))\)

    -
    -

    3.1.3. An example for time complexities of nested loops

    +
    +

    3.1.3. An example for time complexities of nested loops

    Suppose a loop,

    -
    -for(int i = 1; i <= n; i *= 2){
    +
    +
    for(int i = 1; i <= n; i *= 2){
       ...
    -  for(int j = 1; j <= i; j *= 2){
    +  for(int j = 1; j <= i; j *= 2){
         ...
       }
       ...
     }
     
    +

    Here, outer loop will run log(n) times. Let's consider for some given n, it runs k times, i.e, let \[ k = log(n) \] @@ -763,20 +781,20 @@ Putting value \(k = log(n)\)

    -
    -

    4. Lecture 4

    +
    +

    4. Lecture 4

    -
    -

    4.1. Time complexity of recursive instructions

    +
    +

    4.1. Time complexity of recursive instructions

    To get time complexity of recursive functions/calls, we first also show time complexity as recursive manner.

    -
    -

    4.1.1. Time complexity in recursive form

    +
    +

    4.1.1. Time complexity in recursive form

    We first have to create a way to describe time complexity of recursive functions in form of an equation as, @@ -853,12 +871,12 @@ Here, the recursive calls are func(n-1) and func(n-2), therefore time complexiti

    -
    -

    4.2. Solving Recursive time complexities

    +
    +

    4.2. Solving Recursive time complexities

    -
    -

    4.2.1. Iterative method

    +
    +

    4.2.1. Iterative method

    • Take for example,
    • @@ -939,8 +957,8 @@ Time complexity is

    -
    -

    4.2.2. Master Theorem for Subtract recurrences

    +
    +

    4.2.2. Master Theorem for Subtract recurrences

    For recurrence relation of type @@ -970,8 +988,8 @@ Since a > 1, \(T(n) = O(n^2 . 3^n)\)

    -
    -

    4.2.3. Master Theorem for divide and conquer recurrences

    +
    +

    4.2.3. Master Theorem for divide and conquer recurrences

    \[ T(n) = aT(n/b) + f(n).(log(n))^k \] @@ -1027,12 +1045,12 @@ So time complexity is,

    -
    -

    4.3. Square root recurrence relations

    +
    +

    4.3. Square root recurrence relations

    -
    -

    4.3.1. Iterative method

    +
    +

    4.3.1. Iterative method

    Example, @@ -1074,8 +1092,8 @@ Time complexity is,

    -
    -

    4.3.2. Master Theorem for square root recurrence relations

    +
    +

    4.3.2. Master Theorem for square root recurrence relations

    For recurrence relations with square root, we need to first convert the recurrance relation to the form with which we use master theorem. Example, @@ -1131,16 +1149,16 @@ Putting value of m,

    -
    -

    5. Lecture 5

    +
    +

    5. Lecture 5

    -
    -

    5.1. Extended Master's theorem for time complexity of recursive algorithms

    +
    +

    5.1. Extended Master's theorem for time complexity of recursive algorithms

    -
    -

    5.1.1. For (k = -1)

    +
    +

    5.1.1. For (k = -1)

    \[ T(n) = aT(n/b) + f(n).(log(n))^{-1} \] @@ -1156,8 +1174,8 @@ Putting value of m,

    -
    -

    5.1.2. For (k < -1)

    +
    +

    5.1.2. For (k < -1)

    \[ T(n) = aT(n/b) + f(n).(log(n))^{k} \] @@ -1174,8 +1192,8 @@ Putting value of m,

    -
    -

    5.2. Tree method for time complexity of recursive algorithms

    +
    +

    5.2. Tree method for time complexity of recursive algorithms

    Tree method is used when there are multiple recursive calls in our recurrance relation. Example, @@ -1304,8 +1322,8 @@ Of the two possible time complexities, we consider the one with higher growth ra

    -
    -

    5.2.1. Avoiding tree method

    +
    +

    5.2.1. Avoiding tree method

    The tree method as mentioned is mainly used when we have multiple recursive calls with different factors. But when using the big-oh notation (O). We can avoid tree method in favour of the master's theorem by converting recursive call with smaller factor to larger. This works since big-oh calculates worst case. Let's take our previous example @@ -1324,8 +1342,8 @@ Now, our recurrance relation is in a form where we can apply the mater's theorem

    -
    -

    5.3. Space complexity

    +
    +

    5.3. Space complexity

    The amount of memory used by the algorithm to execute and produce the result for a given input size is space complexity. Similar to time complexity, when comparing two algorithms space complexity is usually represented as the growth rate of memory used with respect to input size. The space complexity includes @@ -1340,8 +1358,8 @@ The amount of memory used by the algorithm to execute and produce the result for

    -
    -

    5.3.1. Auxiliary space complexity

    +
    +

    5.3.1. Auxiliary space complexity

    The space complexity when we disregard the input space is the auxiliary space complexity, so we basically treat algorithm as if it's input space is zero. Auxiliary space complexity is more useful when comparing algorithms because the algorithms which are working towards same result will have the same input space, Example, the sorting algorithms will all have the input space of the list, so it is not a metric we can use to compare algorithms. So from here, when we calculate space complexity, we are trying to calculate auxiliary space complexity and sometimes just refer to it as space complexity. @@ -1350,8 +1368,8 @@ The space complexity when we disregard the input space is the auxiliary space co

    -
    -

    5.4. Calculating auxiliary space complexity

    +
    +

    5.4. Calculating auxiliary space complexity

    There are two parameters that affect space complexity, @@ -1362,8 +1380,8 @@ There are two parameters that affect space complexity,

    -
    -

    5.4.1. Data Space used

    +
    +

    5.4.1. Data Space used

    The data space used by the algorithm depends on what data structures it uses to solve the problem. Example, @@ -1418,8 +1436,8 @@ Here, we create a matrix of size n*n, so the increase in allocated space

    -
    -

    5.4.2. Code Execution space in recursive algorithm

    +
    +

    5.4.2. Code Execution space in recursive algorithm

    When we use recursion, the function calls are stored in the stack. This means that code execution space will increase. A single function call has fixed (constant) space it takes in the memory. So to get space complexity, we need to know how many function calls occur in the longest branch of the function call tree. @@ -1510,12 +1528,12 @@ Number of levels is \(log_2n\). Therefore, space complexity is \(\theta (log_

    -
    -

    6. Lecture 6

    +
    +

    6. Lecture 6

    -
    -

    6.1. Divide and Conquer algorithms

    +
    +

    6.1. Divide and Conquer algorithms

    Divide and conquer is a problem solving strategy. In divide and conquer algorithms, we solve problem recursively applying three steps : @@ -1538,12 +1556,12 @@ Divide and conquer is a problem solving strategy. In divide and conquer algorith

    -
    -

    6.2. Searching for element in array

    +
    +

    6.2. Searching for element in array

    -
    -

    6.2.1. Straight forward approach for searching (Linear Search)

    +
    +

    6.2.1. Straight forward approach for searching (Linear Search)

    int linear_search(int *array, int n, int x){
    @@ -1563,14 +1581,15 @@ Recursive approach
     

    -
    # call this function with index = 0
    -def linear_search(array, item, index):
    -    if len(array) < 1:
    -        return -1
    -    elif array[index] == item:
    -        return index
    -    else:
    -        return linear_search(array, item, index + 1)
    +
    // call this function with index = 0
    +int linear_search(int array[], int item, int index){
    +  if( index >= len(array) )
    +    return -1;
    +  else if (array[index] == item)
    +    return index;
    +  else
    +    return linear_search(array, item, index + 1);
    +}
     
    @@ -1665,8 +1684,8 @@ Recursive approach
    -
    -

    6.2.2. Divide and conquer approach (Binary search)

    +
    +

    6.2.2. Divide and conquer approach (Binary search)

    The binary search algorithm works on an array which is sorted. In this algorithm we: @@ -1690,7 +1709,7 @@ Suppose binarySearch(array, left, right, key), left and right are indicies of le -

    +

    binary-search.jpg

    @@ -1761,23 +1780,28 @@ Recursive approach:
    -
    -

    6.3. Max and Min element from array

    +
    +

    6.3. Max and Min element from array

    -
    -

    6.3.1. Straightforward approach

    +
    +

    6.3.1. Straightforward approach

    -
    def min_max(a):
    -    max = min = a[1]
    -    for i in range(2, n):
    -        if a[i] > max:
    -            max = a[i];
    -        elif a[i] < min:
    -            min = a[i];
    -
    -    return (min,max)
    +
    struc min_max {int min; int max;}
    +min_max(int array[]){
    +  int max = array[0];
    +  int min = array[0];
    +
    +  for(int i = 0; i < len(array); i++){
    +    if(array[i] > max)
    +      max = array[i];
    +    else if(array[i] < min)
    +      min = array[i];
    +  }
    +
    +  return (struct min_max) {min, max};
    +}
     
    @@ -1789,8 +1813,8 @@ Recursive approach:
    -
    -

    6.3.2. Divide and conquer approach

    +
    +

    6.3.2. Divide and conquer approach

    Suppose the function is MinMax(array, left, right) which will return a tuple (min, max). We will divide the array in the middle, mid = (left + right) / 2. The left array will be array[left:mid] and right aray will be array[mid+1:right] @@ -1869,8 +1893,8 @@ If n is not a power of 2, we will round the number of comparision up.

    -
    -

    6.3.3. Efficient single loop approach (Increment by 2)

    +
    +

    6.3.3. Efficient single loop approach (Increment by 2)

    In this algorithm we will compare pairs of numbers from the array. It works on the idea that the larger number of the two in pair can be the maximum number and smaller one can be the minimum one. So after comparing the pair, we can simply test from maximum from the bigger of two an minimum from smaller of two. This brings number of comparisions to check two numbers in array from 4 (when we increment by 1) to 3 (when we increment by 2). @@ -1917,12 +1941,12 @@ In this algorithm we will compare pairs of numbers from the array. It works on t

    -
    -

    7. Lecture 7

    +
    +

    7. Lecture 7

    -
    -

    7.1. Square matrix multiplication

    +
    +

    7.1. Square matrix multiplication

    Matrix multiplication algorithms taken from here: @@ -1930,8 +1954,8 @@ Matrix multiplication algorithms taken from here:

    -
    -

    7.1.1. Straight forward method

    +
    +

    7.1.1. Straight forward method

    /* This will calculate A X B and store it in C. */
    @@ -1970,8 +1994,8 @@ Time complexity is \(O(n^3)\)
     
    -
    -

    7.1.2. Divide and conquer approach

    +
    +

    7.1.2. Divide and conquer approach

    The divide and conquer algorithm only works for a square matrix whose size is n X n, where n is a power of 2. The algorithm works as follows. @@ -2012,8 +2036,8 @@ Using the master's theorem

    -
    -

    7.1.3. Strassen's algorithm

    +
    +

    7.1.3. Strassen's algorithm

    Another, more efficient divide and conquer algorithm for matrix multiplication. This algorithm also only works on square matrices with n being a power of 2. This algorithm is based on the observation that, for A X B = C. We can calculate C11, C12, C21 and C22 as, @@ -2081,12 +2105,12 @@ Using the master's theorem

    -
    -

    7.2. Sorting algorithms

    +
    +

    7.2. Sorting algorithms

    -
    -

    7.2.1. In place vs out place sorting algorithm

    +
    +

    7.2.1. In place vs out place sorting algorithm

    If the space complexity of a sorting algorithm is \(\theta (1)\), then the algorithm is called in place sorting, else the algorithm is called out place sorting. @@ -2094,8 +2118,8 @@ If the space complexity of a sorting algorithm is \(\theta (1)\), then the algor

    -
    -

    7.2.2. Bubble sort

    +
    +

    7.2.2. Bubble sort

    Simplest sorting algorithm, easy to implement so it is useful when number of elements to sort is small. It is an in place sorting algorithm. We will compare pairs of elements from array and swap them to be in correct order. Suppose input has n elements. @@ -2177,12 +2201,12 @@ Recursive time complexity : \(T(n) = T(n-1) + n - 1\)

    -
    -

    8. Lecture 8

    +
    +

    8. Lecture 8

    -
    -

    8.1. Selection sort

    +
    +

    8.1. Selection sort

    It is an inplace sorting technique. In this algorithm, we will get the minimum element from the array, then we swap it to the first position. Now we will get the minimum from array[1:] and place it in index 1. Similarly, we get minimum from array[2:] and then place it on index 2. We do till we get minimum from array[len(array) - 2:] and place minimum on index [len(array) - 2]. @@ -2202,7 +2226,11 @@ It is an inplace sorting technique. In this algorithm, we will get the minimum e }

    +
    +
    +

    8.1.1. Time complexity

    +

    The total number of comparisions is, \[ \text{Total number of comparisions} = (n -1) + (n-2) + (n-3) + ... + (1) \] @@ -2216,9 +2244,10 @@ Therefore the time complexity in all cases is, \[ \text{Time complexity} = \thet

    +
    -
    -

    8.2. Insertion sort

    +
    +

    8.2. Insertion sort

    It is an inplace sorting algorithm. @@ -2248,11 +2277,11 @@ It is an inplace sorting algorithm. }

    +
    -
      -
    • Time complexity
    • -
    - +
    +

    8.2.1. Time complexity

    +

    Best Case : The best case is when input array is already sorted. In this case, we do (n-1) comparisions and no swaps. The time complexity will be \(\theta (n)\)
    @@ -2272,9 +2301,10 @@ Total time complexity becomes \(\theta \left( 2 \frac{n(n-1)}{2} \right)\), whic

    +
    -
    -

    8.3. Inversion in array

    +
    +

    8.3. Inversion in array

    The inversion of array is the measure of how close array is from being sorted. @@ -2388,8 +2418,8 @@ Total number of inversions = 1 + 2 = 3

    -
    -

    8.3.1. Relation between time complexity of insertion sort and inversion

    +
    +

    8.3.1. Relation between time complexity of insertion sort and inversion

    If the inversion of an array is f(n), then the time complexity of the insertion sort will be \(\theta (n + f(n))\). @@ -2397,6 +2427,88 @@ If the inversion of an array is f(n), then the time complexity of the insertion

    + +
    +

    8.4. Quick sort

    +
    +

    +It is a divide and conquer technique. It uses a partition algorithm which will choose an element from array, then place all smaller elements to it's left and larger to it's right. Then we can take these two parts of the array and recursively place all elements in correct position. For ease, the element chosen by the partition algorithm is either leftmost or rightmost element. +

    + +
    +
    void quick_sort(int array[], int low, int high){
    +  if(low < high){
    +    int x = partition(array, low, high);
    +    quick_sort(array, low, x-1);
    +    quick_sort(array, x+1, high);
    +  }
    +}
    +
    +
    + +

    +As we can see, the main component of this algorithm is the partition algorithm. +

    +
    + +
    +

    8.4.1. Lomuto partition

    +
    +

    +The partition algorithm will work as follows: +

    + +
    +
    /* Will return the index where the array is partitioned */
    +int partition(int array[], int low, int high){
    +  int pivot = array[high];
    +  /* This will point to the element greater than pivot */
    +  int i = low - 1;
    +
    +  for(int j = low; j < high; j++){
    +    if(array[j] <= pivot){
    +      i += 1;
    +      array[i], array[j] = array[j], array[i];
    +    }
    +  }
    +
    +  array[i+1], array[high] = array[high], array[i+1];
    +  return (i + 1);
    +}
    +
    +
    + +
      +
    • Time complexity
    • +
    +

    +For an array of size n, the number ofcomparisions done by this algorithm is always n - 1. Therefore, the time complexity of this partition algorithm is, +\[ T(n) = \theta (n) \] +

    +
    +
    + +
    +

    8.4.2. Time complexity of quicksort

    +
    +
      +
    • Best Case : The partition algorithm always divides the array to two equal parts. In this case, the recursive relation becomes +\[ T(n) = 2T(n/2) + \theta (n) \] +Where, \(\theta (n)\) is the time complexity for creating partition. +
      +Using the master's theorem. +\[ T(n) = \theta( n.log(n) ) \]
    • + +
    • Worst Case : The partition algorithm always creates the partition at one of the extreme positions of the array. This creates a single partition with n-1 elements. Therefore, the quicksort algorithm has to be called on the remaining n-1 elements of the array. +\[ T(n) = T(n-1) + \theta (n) \] +Again, \(\theta (n)\) is the time complexity for creating partition. +
      +Using master's theorem +\[ T(n) = \theta (n^2) \]
    • +
    +
    +
    +