|
19 | 19 | \newcommand\indep{\protect\mathpalette{\protect\independenT}{\perp}}
|
20 | 20 | \def\independenT#1#2{\mathrel{\rlap{$#1#2$}\mkern2mu{#1#2}}}
|
21 | 21 |
|
| 22 | +%mathbf |
| 23 | +\newcommand{\bfx}{{\mathbf{x}}} |
| 24 | +\newcommand{\bfy}{{\mathbf{y}}} |
| 25 | +\newcommand{\bfX}{{\mathbf{X}}} |
| 26 | + |
22 | 27 | %tab
|
23 | 28 | \newcommand\tab[1][1cm]{\hspace*{#1}}
|
24 | 29 |
|
|
29 | 34 | %blue
|
30 | 35 | \definecolor{azure}{rgb}{0.0, 0.5, 1.0}
|
31 | 36 | \newcommand{\blue}[1]{\textcolor{azure}{#1}}
|
| 37 | +%light-blue |
| 38 | +\definecolor{lightblue}{HTML}{00BFFF} |
| 39 | +\newcommand{\lightblue}[1]{\textcolor{lightblue}{#1}} |
32 | 40 | %purple
|
33 | 41 | \definecolor{deepfuchsia}{rgb}{0.76, 0.33, 0.76}
|
34 | 42 | \newcommand{\purple}[1]{\textcolor{deepfuchsia}{#1}}
|
|
71 | 79 | \def\bbR{{\mathbb R}}
|
72 | 80 | \def\bbC{{\mathbb C}}
|
73 | 81 |
|
| 82 | +\renewcommand{\P}{{\mathbb P}} |
| 83 | + |
74 | 84 |
|
75 | 85 | % probability notations
|
76 | 86 |
|
77 |
| -\newcommand{\E}{{\mathbb E}} |
78 |
| -\renewcommand{\P}{{\mathbb P}} |
79 | 87 | \newcommand{\A}{{\mathcal A}}
|
80 |
| -\newcommand{\F}{{\mathcal F}} |
81 | 88 | \newcommand{\B}{{\mathcal B}}
|
82 | 89 | \newcommand{\C}{{\mathcal C}}
|
83 |
| -\newcommand{\calF}{{\mathcal F}} |
84 |
| -\newcommand{\calB}{{\mathcal B}} |
85 |
| -\newcommand{\calG}{{\mathcal G}} |
86 |
| -\newcommand{\M}{{\mathcal M}} |
87 |
| -\newcommand{\calA}{{\mathcal A}} |
88 |
| -\newcommand{\calH}{{\mathcal H}} |
| 90 | +\newcommand{\E}{{\mathbb E}} |
| 91 | +\newcommand{\F}{{\mathcal F}} |
89 | 92 | \newcommand{\G}{{\mathcal G}}
|
90 | 93 | \renewcommand{\H}{{\mathcal H}}
|
91 | 94 | \newcommand{\I}{{\mathcal{I}}}
|
| 95 | +\newcommand{\M}{{\mathcal M}} |
| 96 | +\newcommand{\Q}{{\mathcal{Q}}} |
92 | 97 | \newcommand{\U}{{\mathcal{U}}}
|
93 | 98 | \newcommand{\X}{{\mathcal{X}}}
|
94 | 99 |
|
95 |
| -\newcommand{\bfx}{{\mathbf{x}}} |
96 |
| -\newcommand{\bfy}{{\mathbf{y}}} |
97 |
| -\newcommand{\bfX}{{\mathbf{X}}} |
| 100 | +\newcommand{\calP}{{\mathcal{P}}} |
| 101 | +\newcommand{\Var}{{\text{Var}}} |
98 | 102 |
|
99 | 103 | % greek letters
|
100 | 104 |
|
|
352 | 356 | \end{definition*}\vs
|
353 | 357 |
|
354 | 358 | \underline{Chapter 4, section 4.2} \\
|
| 359 | + |
| 360 | +The prune sampling algorithm is inspired by the MC-SAT algorithm [23]. \\ |
| 361 | + |
| 362 | +[23] Hoifung Poon and Pedro Domingos, \textit{Sound and efficient inference with probabilistic and deterministic dependencies}, Aaai, 2006, pp. 458-463. \\ |
| 363 | + |
355 | 364 | \begin{definition*}\textbf{ (Non-trivial steps of prune sampling) }
|
356 | 365 | \begin{enumerate}[1)]
|
357 | 366 | \item Generating an initial state
|
|
364 | 373 | \end{enumerate}
|
365 | 374 | \end{definition*}\vspace{2pc}
|
366 | 375 |
|
| 376 | +\begin{enumerate}[1)] |
| 377 | +\item How is an initial state created? $\mathbf{x}^{(0)} \leftarrow \text{initial}$\\ \\ |
| 378 | +\textit{Hybrid forward sampling:} \\ \\ |
| 379 | +Basically \textit{forward sampling} but at each node $X_i$ either -- with probability $p$ -- the sampling distribution $\P(X_i\ |\ Pa(X_i)\ =\ \bfx )$ is chosen or -- with probability $1-p$ -- the uniform distribution over $\{x:\ \P(X_i\ |\ Pa(X_i)\ =\ \bfx)>0 \}$ is chosen. \\ \\ |
| 380 | + \texttt{ |
| 381 | +initial = hybrid\_fw(network, \lightblue{evidence} = evidence, \lightblue{num\_walks} = num\_walks0, \lightblue{prob} = prob0) \\ \\ |
| 382 | +} |
| 383 | +To develop more intelligent ways to generate initial states: $[18]$. \\ \\ |
| 384 | +$[18]$ James D Park, \textit{Using weighted MAX-SAT engines to solve MPE}, Eighteenth national conference on artificial intelligence, 2002, pp. 682-687. \\ |
| 385 | +\item Sampling from $\U(S_{\C_{\mathbf{x,n}}})$ |
| 386 | +\begin{itemize} |
| 387 | +\item Assuming we have sufficient memory, a breath first search approach can be used to list all feasible states of the pruned BN. From this collection we can easily draw uniformly a state. (in comparison to Gibbs sampling the uniform sampling step is relatively expensive) |
| 388 | +\item To reduce computational effort: we propose to use random forward sampling to construct a set $S$ (of predetermined fixed size) of feasible states of the pruned BN. Subsequently a state from S can be sampled uniformly. |
| 389 | +\item A more intelligent method, based on \textit{simulated annealing} is suggested by $[28]$. \\ |
| 390 | + |
| 391 | +$[28]$ Wei Wei, Jordan Erenrich, and Bart Selman, \textit{Towards efficient sampling: Exploiting random walk strategies}, Aaai, 2004, pp. 670-676. |
| 392 | +\end{itemize} |
| 393 | +\end{enumerate} |
| 394 | + |
| 395 | +\vspace{2pc} |
367 | 396 | \makebox[\linewidth]{\rule{\textwidth}{0.6pt}} \\
|
368 | 397 | \textbf{Prune Sampling Algorithm} \\
|
369 | 398 | \makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \\
|
370 | 399 | \texttt{
|
371 | 400 | \blue{def} sample\_states(data\_str\_node, col\_index) \\ \\
|
372 | 401 | data\_str\_node = data\_str[node] \\ \\
|
373 | 402 | data\_str = generate\_data\_str(network, ev, node\_list) \\ \\
|
374 |
| -\blue{def} generate\_data\_str(network, evidence, node\_list)\\ \\ |
375 |
| -network, evidence} is input \\ \\ |
| 403 | +} \\ |
| 404 | +\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \\ |
| 405 | + |
376 | 406 | \texttt{
|
377 |
| -node\_list = [] \\ \\ |
378 |
| -\purple{for} i \blue{in} \blond{range}(num\_levels):\\ |
379 |
| -\tab node\_list = node\_list + level\_sets[i] \\ \\ |
380 |
| -level\_sets = create\_level\_sets(network) \\ \\ |
381 |
| -\blue{def} \blond{create\_level\_sets}(\blue{network}) \\ |
382 |
| -r.488 |
383 |
| -} |
| 407 | +\blue{def} \blond{prune\_sampling} \\ \\ |
| 408 | +\tab data\_str = generate\_data\_structure(...) \\ |
| 409 | +\tab \blue{def} \blond{generate\_data\_structure} \\ |
| 410 | +\tab \tab \blue{def} \blond{create\_cpt\_shifts} \\ \\ |
| 411 | +\tab if heuristic == 1 \\ |
| 412 | +\tab \blue{def} \blond{random\_fw\_heuristic} \\ |
| 413 | +\tab \tab \blue{def} \blond{random\_walk} \\ |
| 414 | +\tab \tab \tab \blue{def} \blond{depth} \\ |
| 415 | +\tab \tab \tab \tab data\_str\_node = data\_str[node] \\ |
| 416 | +\tab \tab \tab \tab shifts = data\_str\_node['shifts'] \\ \\ |
| 417 | +\tab else \\ |
| 418 | +\tab \blue{def} \blond{bfs\_exhaust} \\ |
| 419 | +\tab \tab \blue{def} \blond{depth} \\ |
| 420 | +\tab \tab \tab shifts = data\_str\_node[`shifts'] \\ |
| 421 | +\tab \tab \tab pstates\_node = sample\_states(data\_str\_node, new\_col) |
| 422 | +} \vspace{2pc} |
| 423 | + |
| 424 | +Prune sampling algorithm |
| 425 | +\begin{itemize} |
| 426 | +\item Performance of prune sampling algorithm |
| 427 | +\begin{itemize} |
| 428 | +\item Convergence of prior distribution $\Q$ to real distribution $\calP$ |
| 429 | +\item Weight of BN |
| 430 | +\item Complexity |
| 431 | +\item Methods to compare when dag-treewidth and query nodes |
| 432 | +\begin{itemize} |
| 433 | +\item Forward sampling |
| 434 | +\item Metropolis sampling |
| 435 | +\item Gibbs sampling |
| 436 | +\item Prune sampling |
| 437 | +\begin{itemize} |
| 438 | +\item bfs\_exhaust |
| 439 | +\item hybrid\_fw\_heuristics |
| 440 | +\end{itemize} |
| 441 | +\end{itemize} |
| 442 | +\end{itemize} |
| 443 | +\item Suggested Improvements |
| 444 | +\begin{itemize} |
| 445 | +\item Implementation |
| 446 | +\item Results |
| 447 | +\end{itemize} |
| 448 | +\item Complexity of Bayesian networks |
| 449 | +\begin{itemize} |
| 450 | +\item Applications of Bayesian networks |
| 451 | +\begin{itemize} |
| 452 | +\item Medicine |
| 453 | +\item Finance |
| 454 | +\end{itemize} |
| 455 | +\end{itemize} |
| 456 | +\end{itemize} |
| 457 | + |
| 458 | +\newpage |
| 459 | + |
| 460 | +\section{Monte Carlo error analysis} |
384 | 461 |
|
| 462 | +\begin{align*} |
| 463 | +\sigma^2 &= \langle y^2 \rangle - \langle y \rangle ^2 \\ |
| 464 | +\langle y \rangle &= \frac{1}{N} \sum_{i=1}^N y_i \\ |
| 465 | +\langle y^2 \rangle &= \frac{1}{N} \sum_{i=1}^N y_i^2 \\ |
| 466 | +\sigma_N &= \sqrt{ \frac{1}{N} \sum_{i=1}^N ( y_i - \bar{y}_N )^2 } \\ |
| 467 | +\log \sigma_N &= \frac{1}{2} \log \big( \frac{1}{N} \sum_{i=1}^N ( y_i - \bar{y}_N )^2 \big) |
| 468 | + \end{align*} \vspace{3pc} |
| 469 | + |
| 470 | +Let $Y$ has a finite variance and $\Var(Y)=\sigma^2 < \infty$. In IID sampling, $\hat{\mu}_n$ is a random variable and it has its own mean and variance. The mean of $\hat{\mu}_n$ is |
| 471 | +\begin{align*} |
| 472 | +\E[\hat{\mu}_n] = \frac{1}{n} \sum_{i=1}^n \E[Y_i] = \mu |
| 473 | +\end{align*} |
| 474 | +The variance of $\hat{\mu}_n$ is |
| 475 | +\begin{align*} |
| 476 | +\E[(\hat{\mu}_n - \mu)^2 ] = \frac{\sigma^2}{n}. |
| 477 | +\end{align*} |
| 478 | +This gives us, $\sqrt{\E[(\hat{\mu}_n - \mu)^2 ]} = \sigma / \sqrt{n}$. To emphasize that the error is order $\sqrt{n}$ and to de-emphasize $\sigma$, we write root mean squared error, RMSE $ = O(\sqrt{n})$ as $n \to \infty$. |
| 479 | + |
| 480 | +\vspace{3pc} |
385 | 481 |
|
| 482 | + |
| 483 | +\begin{align*} |
| 484 | +\P(Rain = T | GrassWet = T) &= \frac{\P(Rain = T, GrassWet = T)}{\P(GrassWet = T)} \\ |
| 485 | +&= \frac{\sum_{S \in \{T,F\}} \P(rain = T, Sprinkler, GrassWet = T)}{\sum_{R,S \in \{T,F\}} \P(GrassWet = T, Rain, Sprinkler)} \\ |
| 486 | +&= \frac{0,00198_{TTT} + 0,1584_{TFT}}{0,00198_{TTT} + 0,288_{TTF} + 0,1584_{TFT} + 0.0_{TFF}} \\ |
| 487 | +&= 0,3577 \\ |
| 488 | +&= 1 - 0,6423 |
| 489 | +\end{align*}\vs |
| 490 | + |
| 491 | +\begin{align*} |
| 492 | +\P(GrassWet = T, Rain = T, Sprinkler = T) = &\P(GrassWet = T | Rain = T, Sprinkler = T) * \\ |
| 493 | +&\P(Sprinkler = T | Rain = T) * \P(Rain = T) \\ |
| 494 | += & 0,99 * 0,01 * 0,2 \\ |
| 495 | += & 0,00198 |
| 496 | +\end{align*} \vs |
386 | 497 |
|
387 | 498 | \newpage
|
388 | 499 |
|
|
397 | 508 | [28] Wei Wei, Jordan Erenrich and Bart Selman, \textit{Towards efficient sampling: Exploiting random walk strategies}, Aaai 2004, pp. 670-676
|
398 | 509 | \end{definition*}\vs
|
399 | 510 |
|
| 511 | +New literature: |
| 512 | +\begin{itemize} |
| 513 | +\item Bayesian Networks and Decision Grpahs, Finn V. JEnsen and Thomas D. Nielsen |
| 514 | +\item Probabilistic Graphical Models, Sucar, Luis Enrique |
| 515 | +\end{itemize} |
| 516 | + |
| 517 | + |
400 | 518 | \end{document}
|
0 commit comments