aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulian T <julian@jtle.dk>2021-06-04 13:00:07 +0200
committerJulian T <julian@jtle.dk>2021-06-04 13:00:07 +0200
commit802c3d64d2402c5bf060fb5488bd10688d2a6965 (patch)
tree5556ab35b73819531103f78579da7abffefa016d
parent703d1962bd5128e0067f49f3889d76e080ece860 (diff)
Add more changes to dig and prob
-rwxr-xr-xrender.py1
-rw-r--r--sem6/dig/m5/ex2.vhdl2
-rw-r--r--sem6/dig/mpc2/opgaver.tex80
-rw-r--r--sem6/prob/eksamnen/notes.tex47
-rw-r--r--sem6/prob/m2/noter.tex48
-rw-r--r--sem6/prob/m2/opgaver.md30
-rw-r--r--sem6/prob/m3/noter.md40
7 files changed, 134 insertions, 114 deletions
diff --git a/render.py b/render.py
index c1ff9fb..78456b6 100755
--- a/render.py
+++ b/render.py
@@ -14,6 +14,7 @@ tex_template = """\\documentclass[12pt]{article}
\\usepackage{float}
\\usepackage{amsthm}
\\usepackage{booktabs}
+\\usepackage{siunitx}
\\usepackage{tikz}
\\usetikzlibrary{automata, positioning, arrows}
diff --git a/sem6/dig/m5/ex2.vhdl b/sem6/dig/m5/ex2.vhdl
index bed016a..86f22a9 100644
--- a/sem6/dig/m5/ex2.vhdl
+++ b/sem6/dig/m5/ex2.vhdl
@@ -17,7 +17,7 @@ architecture impl of ex2 is
begin
output_int <= std_logic_vector(value);
output <= output_int;
- leds <= output_int(7 downto 0);
+ leds <= output_int(23 downto 16);
process (clk)
begin
diff --git a/sem6/dig/mpc2/opgaver.tex b/sem6/dig/mpc2/opgaver.tex
index 6d0826e..74aba7d 100644
--- a/sem6/dig/mpc2/opgaver.tex
+++ b/sem6/dig/mpc2/opgaver.tex
@@ -1,8 +1,28 @@
\title{Opgaver til Microprocessors 2}
\date{2021-03-24}
+Har fundet ud af at jeg har lavet de forkerte opgaver \texttt{:-(}.
+
\section{Problem 4.1}
+\begin{opg}
+ What are the four steps CPUs use to execute instructions
+\end{opg}
+
+\paragraph{Fetch} comes first, where the instruction is fetched from memory.
+This is taken from where the instruction pointer is pointing.
+
+\paragraph{Decode} instruction, where it most likely requires multiple microcode instructions.
+
+Whether to \textbf{Access memory} is determined in the decoding step.
+If this is required, this memory must be fetched from memory.
+
+\paragraph{Execute} the instruction using the fetched memory and register values.
+
+\paragraph{Repeat} from the beginning with a new fetch.
+
+\section{Problem 4.2}
+
\emph{In Fig. 4-6, the B bus register is encoded in a 4-bit field, but the C bus is represented
as a bit map. Why?}
@@ -11,13 +31,13 @@ Therefore one cannot take the shortcut with a 4-bit field, as that would only al
One cannot present 1 and 2 at the same time in 4-bit field, as that would activate register 3.
-\section{Problem 4.5}
+\section{Problem 4.4}
{\itshape
Suppose that in the example of Fig. 4-14(a) the statement
- \begin{verbatim}
- k = 5;
- \end{verbatim}
+\begin{verbatim}
+ k = 5;
+\end{verbatim}
is added after the if statement. What would the new assembly code be? Assume that
the compiler is an optimizing compiler.
}
@@ -38,13 +58,46 @@ Well k is set either way, so one can invert the if.
ISTORE k
\end{verbatim}
+\section{Problem 4.4 Moodle}
+
+\begin{opg}
+ Give two different IJVM translations for the following IJVM code:
+\begin{verbatim}
+ i = j + m + 8;
+\end{verbatim}
+\end{opg}
+
+Dette kan man gøre ved at load $j$ og $m$ fra stacken og add dem.
+Derefter kan push 8 og add den.
+Herefter gemmer man i $i$.
+
+\begin{verbatim}
+ ILOAD j
+ ILOAD m
+ IADD
+ BIPUSH 8
+ IADD
+ ISTORE i
+\end{verbatim}
+
+En anden måde er at push alle ting først også add flere gange efter hinnanden.
+
+\begin{verbatim}
+ ILOAD j
+ ILOAD m
+ BIPUSH 8
+ IADD
+ IADD
+ ISTORE i
+\end{verbatim}
+
\section{Problem 4.9}
{\itshape
How long does a 2.5-GHz Mic-1 take to execute the Java statement
- \begin{verbatim}
- i = j + k
- \end{verbatim}
+\begin{verbatim}
+ i = j + k;
+\end{verbatim}
Give your answer in nanoseconds
}
@@ -59,13 +112,12 @@ First we "compile" the java statement :-).
Then we can add how many microinstructions each takes (\textbf{bold} number) multiplied with how many times it is used.
-\begin{equation}
- \underbrace{\mathbf 1 \cdot 4}_{MAIN} + \underbrace{\mathbf 5 \cdot 2}_{ILOAD} + \underbrace{\mathbf 3}_{IADD} + \underbrace{\mathbf 6}_{ISTORE} = 23
-\end{equation}
+\[
+\underbrace{\mathbf 1 \cdot 4}_{\mathrm{MAIN}} + \underbrace{\mathbf 5 \cdot 2}_{\mathrm{ILOAD}} + \underbrace{\mathbf 3}_{\mathrm{IADD}} + \underbrace{\mathbf 6}_{\mathrm{ISTORE}} = 23
+\]
Then we can multiply with the nanoseconds a single instruction takes
-\begin{equation}
- \frac 1 {2.5 \cdot 10^9} \cdot 23 = 9.2 \cdot 10^{-9}\,,
-\end{equation}
+\[
+ \frac 1 {\SI{2.5e9}{Hz}} \cdot 23 = \SI{9.2e-9}{s}\,,
+\]
which is 9.2 Nanoseconds.
-
diff --git a/sem6/prob/eksamnen/notes.tex b/sem6/prob/eksamnen/notes.tex
deleted file mode 100644
index 4dfee30..0000000
--- a/sem6/prob/eksamnen/notes.tex
+++ /dev/null
@@ -1,47 +0,0 @@
-\title{Eksamnens Noter}
-
-
-The universal set or sample space is the set everything, and is denoted $S$.
-Therefore the probability of hitting $S$ is $P(S) = 1$.
-
-This is the first of 3 axioms repeated below.
-
-\begin{enumerate}
- \item For any event $A$, $P(A) \geq 0$.
- \item The probability of hitting sample space is always 1, $P(S) = 1$.
- \item If events $A_1, A_2, ...$ are \textbf{disjoint} event, then
- \begin{equation}
- P(A_1 \cup A_2 ...) = P(A_1) + P(A_2)\,.
- \end{equation}
-\end{enumerate}
-
-The last axiom requires that the events $A_n$ are disjoint.
-If they aren't one should subtract the part they have in common.
-This is called the \emph{Inclusion-Exclusion Principle}.
-
-\begin{principle}
- The \emph{Inclusion-Exclusion Principle} is defined as
- \begin{equation}
- P(A \cup B) = P(A) + P(B) - P(A \cap B)\,.
- \end{equation}
- Definition with 3 events can be found in the in the book.
-\end{principle}
-
-\section{Counting}
-
-The probability of a event $A$ can be found by
-\begin{equation}
- P(A) = \frac {|A|} {|S|}\,.
-\end{equation}
-It is therefore required to count how many elements are in $S$ and $A$.
-The most simple method is the \emph{multiplication principle}.
-
-\begin{principle}[Multiplication principle]
- Let there be $r$ random experiments, where the $k$'th experiment has $n_k$ outcomes.
- Then there are
- \begin{equation}
- n_1 \cdot n_2 \cdot ... \cdot n_r
- \end{equation}
- possible outcomes over all $r$ experiments.
-\end{principle}
-
diff --git a/sem6/prob/m2/noter.tex b/sem6/prob/m2/noter.tex
index 3eb2e4f..c35f52b 100644
--- a/sem6/prob/m2/noter.tex
+++ b/sem6/prob/m2/noter.tex
@@ -1,6 +1,5 @@
\title{Noter til probability m2}
-\section{Random Variables}
Her mapper man fra et sample space S til en variabel.
Her kalder man variablen et stort tal R eller sådan noget.
@@ -15,7 +14,7 @@ P(X = x) = 0
$$
-\subsection{Cumulative Distribution Function}
+\section{Cumulative Distribution Function}
Her måler man prob for at ens random er mindre end et bestemt tal.
@@ -33,7 +32,7 @@ Ved discrete random variables vil denne være en slags trappe.
Kan sige at den er \emph{continues from the right} eftersom man har $\leq$ i definition.
-\subsection{Probability Mass Function}
+\section{Probability Mass Function}
Works only for discrete random variables.
Is defines as the probability that $X = a$:
@@ -49,15 +48,17 @@ F(a) = \sum_{all x \leq a} p(a)
$$
-\subsection{Probability Density Function}
+\section{Probability Density Function}
Her finder man P i et evigt lille interval:
-Is the derivative of the CDF.
+In the following formula PDF is $f$.
+\begin{equation*}
+ \begin{split}
+ F(a) = P(X \in (-\infty,a]) = \int_{-\infty}^a f(x) dx \\
+ f(a) = \frac{d}{da} F(a)
+ \end{split}
+\end{equation*}
-$$
-F(a) = P(X \in (-\infty,a]) = \int_{-\infty}^a f(x) dx \\
-f(a) = \frac{d}{da} F(a)
-$$
The following must be true:
@@ -65,7 +66,7 @@ $$
\int_{-\infty}^{\infty} f(x) dx = 1
$$
-\subsection{Multiple Random Variables}
+\section{Multiple Random Variables}
Have multiple random variables, which can be or is not correlated.
Can define the joined CDF:
@@ -81,15 +82,28 @@ F_X(x) = P(X \leq x) = P(X \leq, Y < \infty) = F(x, \infty)
$$
One can not go from marginal to the joined, as they do not contain enough information.
-This is only possible if X and Y are \emph{independent}.
-$$
-F_{XY}(x,y) = F_X(x) \cdot F_Y(x) \\
-p(x,y) = p_X(x) \cdot p_Y(y) \\
-f(x,y) = f_X(x) \cdot f_Y(y)
-$$
+However if two random variables, and $A$ and $B$ are two sets of real numbers:
+\[
+ P(X \in A, Y \in B) = P(X \in A) P(Y \in B)\,.
+\]
+% This is only possible if X and Y are \emph{independent}.
+% \begin{align*}
+% F_{XY}(x,y) &= F_X(x) \cdot F_Y(x) \\
+% p(x,y) &= p_X(x) \cdot p_Y(y) \\
+% f(x,y) &= f_X(x) \cdot f_Y(y)
+% \end{align*}
+
+\section{Conditional PDF}
+
+If $X$ and $Y$ have a joint PDF, then the conditional PDF of X given that $Y=y$ is
+\[
+ F_{X|Y}(x|y) = \frac {f(x, y)} {f_Y(y)}
+\]
+
+There is also one for PMF not listed here.
-\subsection{Joined PMF}
+\section{Joined PMF}
$$
P_{XY}(x,y) = P(X = x, Y = y)
diff --git a/sem6/prob/m2/opgaver.md b/sem6/prob/m2/opgaver.md
index 0ce9c77..601aa86 100644
--- a/sem6/prob/m2/opgaver.md
+++ b/sem6/prob/m2/opgaver.md
@@ -5,16 +5,16 @@
Man kan sige at chances for at en kvinde kommer først er 50%.
$$
-P(1) = \frac 1 2
+P(1) = \frac 1 2 = 0.5
$$
-Herefter kræver det at en mand for først og en kvinde får næste.
+Herefter kræver det at en mand får først og en kvinde får næste.
-$$
-P(2) = \frac 5 10 \cdot \frac 5 9 \\
-P(3) = \frac 5 10 \cdot \frac 4 9 \frac 5 8 \\
-P(4) = \frac 5 10 \cdot \frac 4 9 \frac 3 8 \cdot \frac 5 8
-$$
+\begin{align*}
+ P(2) &= \frac 5 {10} \cdot \frac 5 9 = 0.2778\\
+ P(3) &= \frac 5 {10} \cdot \frac 4 9 \cdot \frac 5 8 = 0.1389 \\
+ P(4) &= \frac 5 {10} \cdot \frac 4 9 \cdot \frac 3 8 \cdot \frac 5 7 = 0.0595
+\end{align*}
osv.
@@ -52,22 +52,22 @@ $$
Først skal man finde $\lambda$.
-$$
- \int_{0}^{\infty} \lambda e^{- \frac x {100}} \mathrm{dx} = 1 \\
- \left[ - \lambda 100 \cdot e^{- \frac x {100}}\right]_{0}^{\infty} = 1 \\
- \lambda \cdot 100 = 1 \\
- \lambda = \frac 1 {100}
-$$
+\begin{align*}
+ \int_{0}^{\infty} \lambda e^{- \frac x {100}} \mathrm{dx} &= 1 \\
+ \left[ - \lambda 100 \cdot e^{- \frac x {100}}\right]_{0}^{\infty} &= 1 \\
+ \lambda \cdot 100 &= 1 \\
+ \lambda &= \frac 1 {100}
+\end{align*}
Nu kan man sætte 50 til 150 ind.
$$
- P(50 < x \leq 150) = \int_{50}^{150} f(x) \mathrm{dx} = - e^{- \frac {150} {100}} + e^{ - \frac {50} {100}} = 0.3834
+ P(50 < x \leq 150) = \int_{50}^{150} f(x) \mathrm{dx} = \left[ - 100 \cdot \frac 1 {100} \cdot e^{- \frac x {100}} \right]_{50}^{150} = -e^{- \frac{150} {100}} + e^{ - \frac {50} {100}} \approx 0.3834
$$
Derefter kan vi tage fra 0 til 100.
$$
- P(x < 100) = \int_{0}^{100} f(x) \mathrm{dx} = - e^{- \frac {100} {100}} = - \frac 1 e
+ P(x < 100) = \int_{0}^{100} f(x) \mathrm{dx} = - e^{- \frac {100} {100}} + 1 = 1 - \frac 1 e \approx 0.6321
$$
diff --git a/sem6/prob/m3/noter.md b/sem6/prob/m3/noter.md
index 23990ef..c784a6a 100644
--- a/sem6/prob/m3/noter.md
+++ b/sem6/prob/m3/noter.md
@@ -19,7 +19,7 @@ $$
E[X] = \int_{\infty}^{\infty} x f(x) \mathrm{dx}
$$
-Can also calculate expectation distribution function:
+Can also calculate expectation distribution function, however this can only be used if all values are non-negative:
$$
E[X] = \sum_{k=0}^{\infty} P(X > k) \\
@@ -55,7 +55,7 @@ $$
E[Z] = \sum_{i} \sum_{j} g(x_i, y_j) \cdot p(x_i, y_j)
$$
-If discrete just use integrals instead.
+If continues just use integrals instead.
$$
E[Z] = \int_{-\infty}^{\infty} \int_{-\infty}^{\infty} g(x, y) \cdot f(x, y) \mathrm{dxdy}
@@ -69,27 +69,27 @@ $$
If $X$ and $Y$ are **independent** the following is true:
-$$
- E[g_1(X) \cdot g_2(Y)] = E[g_1(X)] \cdot E[g_2(Y)] \\
- E[X \cdot Y] = E[X] \cdot E[Y]
-$$
+\begin{align*}
+ E[g_1(X) \cdot g_2(Y)] &= E[g_1(X)] \cdot E[g_2(Y)] \\
+ E[X \cdot Y] &= E[X] \cdot E[Y]
+\end{align*}
## Variance
Describes the mean of the distance between outcomes and the overall mean.
Good way to describe the spread of the random variable.
-$$
-Var(X) = E[(X - E[X])^2] \\
-Var(X) = E[X^2] - E[X]^2
-$$
+\begin{align*}
+ Var(X) &= E[(X - E[X])^2] \\
+ Var(X) &= E[X^2] - E[X]^2
+\end{align*}
If there is no power of two, it will be mean minus mean, which wont work.
One can define the *standard deviation* to bring back the unit from squared.
$$
- Std(X) = \sqrt{ (Var(X)) }
+ Std(X) = \sqrt{ Var(X) }
$$
A rule for variance:
@@ -110,21 +110,21 @@ If X and Y are independent the Cov part disappears.
## Covariance
-$$
- Cov(X,Y) = E[(X - E[X]) \cdot (Y - E[Y])] \\
- Cov(X,Y) = E[XY] - E[X] \cdot E[Y]
-$$
+\begin{align*}
+ Cov(X,Y) &= E[(X - E[X]) \cdot (Y - E[Y])] \\
+ Cov(X,Y) &= E[XY] - E[X] \cdot E[Y]
+\end{align*}
Shows whether two variables vary together, can be both positive and negative.
If it is possible $X$ and $Y$ are varying from the average together.
Some rules below:
-$$
- Cov(X, X) = Var(X) \\
- Cov(a X, Y) = a Cov(X, Y) \\
- Cov(X + Y, Z) = Voc(X, Z) + Cov(Y, Z)
-$$
+\begin{align*}
+ Cov(X, X) &= Var(X) \\
+ Cov(a X, Y) &= a Cov(X, Y) \\
+ Cov(X + Y, Z) &= Cov(X, Z) + Cov(Y, Z)
+\end{align*}
If X and Y are independent, then covariance is zero (X and Y are *uncorrelated*).
X and Y can be uncorrelated and not be independent.