Latex伪代码使用总结
algorithmicx例子
相应代码:
1. \documentclass[11pt]{ctexart}
2. \usepackage[top=2cm, bottom=2cm, left=2cm, right=2cm]{geometry}
3. \usepackage{algorithm}
4. \usepackage{algorithmicx}
5. \usepackage{algpseudocode}
6. \usepackage{amsmath}
7.
8. \floatname{algorithm}{算法}
9. \renewcommand{\algorithmicrequire}{\textbf{输入:}}
10. \renewcommand{\algorithmicensure}{\textbf{输出:}}
11.
12. \begin{document}
13. \begin{algorithm}
14. \caption{用归并排序求逆序数}
15. \begin{algorithmic}[1] %每行显示行号
16. \Require $Array$数组,$n$数组大小
17. \Ensure 逆序数
18. \Function {MergerSort}{$Array, left, right$}
19. \State $result \gets 0$
20. \If {$left < right$}
21. \State $middle \gets (left + right) / 2$
22. \State $result \gets result +$ \Call{MergerSort}{$Array, left, middle$}
23. \State $result \gets result +$ \Call{MergerSort}{$Array, middle, right$}
24. \State $result \gets result +$ \Call{Merger}{$Array,left,middle,right$}
25. \EndIf
26. \State \Return{$result$}
27. \EndFunction
28. \State
29. \Function{Merger}{$Array, left, middle, right$}
30. \State $i\gets left$
31. \State $j\gets middle$
32. \State $k\gets 0$
33. \State $result \gets 0$
34. \While{$i<middle$ \textbf{and} $j<right$}
35. \If{$Array[i]<Array[j]$}
36. \State $B[k++]\gets Array[i++]$
37. \Else
38. \State $B[k++] \gets Array[j++]$
39. \State $result \gets result + (middle - i)$
40. \EndIf
41. \EndWhile
42. \While{$i<middle$}
43. \State $B[k++] \gets Array[i++]$
44. \EndWhile
45. \While{$j<right$}
46. \State $B[k++] \gets Array[j++]$
47. \EndWhile
48. \For{$i = 0 \to k-1$}
49. \State $Array[left + i] \gets B[i]$
50. \EndFor
51. \State \Return{$result$}
52. \EndFunction
53. \end{algorithmic}
54. \end{algorithm}
55. \end{document}
algorithm例子
前期准备
1. \usepackage{algorithm}
2. \usepackage{algpseudocode}
3. \usepackage{amsmath}
4. \renewcommand{\algorithmicrequire}{\textbf{Input:}} % Use Input in the format of Algorithm
5. \renewcommand{\algorithmicensure}{\textbf{Output:}} % Use Output in the format of Algorithm
example 1
1. \begin{algorithm}[htb]
2. \caption{ Framework of ensemble learning for our system.}
3. \label{alg:Framwork}
4. \begin{algorithmic}[1]
5. \Require
6. The set of positive samples for current batch, $P_n$;
7. The set of unlabelled samples for current batch, $U_n$;
8. Ensemble of classifiers on former batches, $E_{n-1}$;
9. \Ensure
10. Ensemble of classifiers on the current batch, $E_n$;
11. \State Extracting the set of reliable negative and/or positive samples $T_n$ from $U_n$ with help of $P_n$;
12. \label{code:fram:extract}
13. \State Training ensemble of classifiers $E$ on $T_n \cup P_n$, with help of data in former batches;
14. \label{code:fram:trainbase}
15. \State $E_n=E_{n-1}cup E$;
16. \label{code:fram:add}
17. \State Classifying samples in $U_n-T_n$ by $E_n$;
18. \label{code:fram:classify}
19. \State Deleting some weak classifiers in $E_n$ so as to keep the capacity of $E_n$;
20. \label{code:fram:select} \\
21. \Return $E_n$;
22. \end{algorithmic}
23. \end{algorithm}
example 2
1. \begin{algorithm}[h]
2. \caption{An example for format For \& While Loop in Algorithm}
3. \begin{algorithmic}[1]
4. \For{each $i\in [1,9]$}
5. \State initialize a tree $T_{i}$ with only a leaf (the root);
6. \State $T=T\cup T_{i};$
7. \EndFor
8. \ForAll {$c$ such that $c\in RecentMBatch(E_{n-1})$}
9. \label{code:TrainBase:getc}
10. \State $T=T\cup PosSample(c)$;
11. \label{code:TrainBase:pos}
12. \EndFor;
13. \For{$i=1$; $i<n$; $i++$ }
14. \State $//$ Your source here;
15. \EndFor
16. \For{$i=1$ to $n$}
17. \State $//$ Your source here;
18. \EndFor
19. \State $//$ Reusing recent base classifiers.
20. \label{code:recentStart}
21. \While {$(|E_n| \leq L_1 )and( D \neq \phi)$}
22. \State Selecting the most recent classifier $c_i$ from $D$;
23. \State $D=D-c_i$;
24. \State $E_n=E_n+c_i$;
25. \EndWhile
26. \label{code:recentEnd}
27. \end{algorithmic}
28. \end{algorithm}
example 3
代码:
1. \begin{algorithm}[h]
2. \caption{Conjugate Gradient Algorithm with Dynamic Step-Size Control}
3. \label{alg::conjugateGradient}
4. \begin{algorithmic}[1]
5. \Require
6. $f(x)$: objective funtion;
7. $x_0$: initial solution;
8. $s$: step size;
9. \Ensure
10. optimal $x^{*}$
11. \State initial $g_0=0$ and $d_0=0$;
12. \Repeat
13. \State compute gradient directions $g_k=\bigtriangledown f(x_k)$;
14. \State compute Polak-Ribiere parameter $\beta_k=\frac{g_k^{T}(g_k-g_{k-1})}{\parallel g_{k-1} \parallel^{2}}$;
15. \State compute the conjugate directions $d_k=-g_k+\beta_k d_{k-1}$;
16. \State compute the step size $\alpha_k=s/\parallel d_k \parallel_{2}$;
17. \Until{($f(x_k)>f(x_{k-1})$)}
18. \end{algorithmic}
19. \end{algorithm}
example 4
代码:
1. \makeatletter
2. \def\BState{\State\hskip-\ALG@thistlm}
3. \makeatother
4. \begin{algorithm}
5. \caption{My algorithm}\label{euclid}
6. \begin{algorithmic}[1]
7. \Procedure{MyProcedure}{}
8. \State $\textit{stringlen} \gets \text{length of }\textit{string}$
9. \State $i \gets \textit{patlen}$
10. \BState \emph{top}:
11. \If {$i > \textit{stringlen}$} \Return false
12. \EndIf
13. \State $j \gets \textit{patlen}$
14. \BState \emph{loop}:
15. \If {$\textit{string}(i) = \textit{path}(j)$}
16. \State $j \gets j-1$.
17. \State $i \gets i-1$.
18. \State \textbf{goto} \emph{loop}.
19. \State \textbf{close};
20. \EndIf
21. \State $i \gets i+\max(\textit{delta}_1(\textit{string}(i)),\textit{delta}_2(j))$.
22. \State \textbf{goto} \emph{top}.
23. \EndProcedure
24. \end{algorithmic}
25. \end{algorithm}
algorithm2e例子
algorithm2e包可能会与其它包产生冲突,一个常见的错误提示是“Too many }'...”。为了解决这个问题,要在引入algorithm2e包之前加入下面的命令:
1. \makeatletter
2. \newif\if@restonecol
3. \makeatother
4. \let\algorithm\relax
5. \let\endalgorithm\relax
所以前期准备:
1. \makeatletter
2. \newif\if@restonecol
3. \makeatother
4. \let\algorithm\relax
5. \let\endalgorithm\relax
6. \usepackage[linesnumbered,ruled,vlined]{algorithm2e}%[ruled,vlined]{
7. \usepackage{algpseudocode}
8. \usepackage{amsmath}
9. \renewcommand{\algorithmicrequire}{\textbf{Input:}} % Use Input in the format of Algorithm
10. \renewcommand{\algorithmicensure}{\textbf{Output:}} % Use Output in the format of Algorithm
example 1
代码:
1. \begin{algorithm}
2. \caption{identify Row Context}
3. \KwIn{$r_i$, $Backgrd(T_i)$=${T_1,T_2,\ldots ,T_n}$ and similarity threshold $\theta_r$}
4. \KwOut{$con(r_i)$}
5. $con(r_i)= \Phi$\;
6. \For{$j=1;j \le n;j \ne i$}
7. {
8. float $maxSim=0$\;
9. $r^{maxSim}=null$\;
10. \While{not end of $T_j$}
11. {
12. compute Jaro($r_i,r_m$)($r_m\in T_j$)\;
13. \If{$(Jaro(r_i,r_m) \ge \theta_r)\wedge (Jaro(r_i,r_m)\ge r^{maxSim})$}
14. {
15. replace $r^{maxSim}$ with $r_m$\;
16. }
17. }
18. $con(r_i)=con(r_i)\cup {r^{maxSim}}$\;
19. }
20. return $con(r_i)$\;
21. \end{algorithm}
example 2
1. \begin{algorithm}
2. \caption{Service checkpoint image storage node and routing path selection}
3. \LinesNumbered
4. \KwIn{host server $PM_s$ that $SerImg_k$ is fetched from, $subnet_s$ that $PM_s$ belongs to, $pod_s$ that $PM_s$ belongs to}
5. \KwOut{Service image storage server $storageserver$,and the image transfer path $path$}
6. $storageserver$ = Storage node selection($PM_s$, $SerImg_k$,$subnet_s$,$pod_s$)\;
7. \If{ $storageserver$ $\neq$ null}
8. {
9. select a path from $storageserver$ to $PM_s$ and assign the path to $path$\;
10. }
11.
12. \textbf{final} \;
13. \textbf{return} $storageserver$ and $path$;
14. \end{algorithm}
example 3
1. \begin{algorithm}
2. \caption{Storage node selection}
3. \LinesNumbered
4. \KwIn{host server $PM_s$ that the checkpoint image $Img$ is fetched from, $subnet_s$ that $PM_s$ belongs to, $pod_s$ that $PM_s$ belongs to}
5. \KwOut{Image storage server $storageserver$}
6.
7. \For{ each host server $PM_i$ in the same subnet with $PM_s$ }
8. {
9. \If{ $PM_i$ is not a service providing node or checkpoint image storage node of $S_k$ }
10. {
11. add $PM_i$ to $candidateList$ \;
12. }
13. }
14. sort $candidateList$ by reliability desc\;
15. init $storageserver$ ;
16. \For{ each $PM_k$ in $candidateList$}
17. {
18.
19. \If{ $SP(PM_k)$ $\geq$ $E(SP)$ of $pod_i$ and $BM_k$ $\le$ size of $Img$ }
20. {
21. assign $PM_k$ to $storageserver$\;
22. goto final\;
23. }
24. }
25. clear $candidateList$\;
26. add all other subnets in $pod_s$ to $netList$\;
27. \For{ each subnet $subnet_j$ in $netList$}
28. {
29. clear $candidateList$\;
30. \For {each $PM_i$ in $subnet_j$ }
31. {
32. \If{ $PM_i$ is not a service providing node or checkpoint image storage node of $S_k$ }
33. {
34. add $PM_i$ to $candidateList$\;
35. }
36. }
37. sort all host in $candidateList$ by reliability desc\;
38. \For{ each $PM_k$ in $candidateList$}
39. {
40.
41. \If{$SP(PM_k)$ $\geq$ $E(SP)$ of $pod_i$ and $BM_k$ $\le$ size of $Img$}
42. {
43. assign $PM_k$ to $storageserver$ \;
44. goto final\;
45. }
46. }
47. }
48. \textbf{final} \;
49. \textbf{return} $storageserver$;
50. \end{algorithm}
example 4
代码:
1. \begin{algorithm}
2. \caption{Delta checkpoint image storage node and routing path selection}
3. \LinesNumbered
4. \KwIn{host server $PM_s$ that generates the delta checkpoint image $DImg_{kt}$, $subnet_s$ that $PM_s$ belongs to, $pod_s$ that $PM_s$ belongs to}
5. \KwOut{Delta image storage server $storageserver$,and the image transfer path $Path$}
6. $storageserver$ = Storage node selection($PM_s$, $DImg_{kt}$,$subnet_s$,$pod_s$)\;
7. \If{ $storageserver$ $\equiv$ null}
8. {
9. the delta checkpoint image is stored in the central storage server\;
10. goto final\;
11. }
12. construct weighted topological graph $graph_s$ of $pod_s$\;
13. calculate the shortest path from $storageserver$ to $PM_s$ in $graph_s$ by using the Dijkstra algorithm\;
14. \textbf{final} \;
15. \textbf{return} $storageserver$ and $path$;
16. \end{algorithm}
example 5
1. \documentclass[8pt,twocolumn]{ctexart}
2. \usepackage{amssymb}
3. \usepackage{bm}
4. \usepackage{textcomp} %命令\textacutedbl的包,二阶导符号
5.
6. % Page length commands go here in the preamble
7. \setlength{\oddsidemargin}{-0.25in} % Left margin of 1 in + 0 in = 1 in
8. \setlength{\textwidth}{9in} % 纸张宽度Right margin of 8.5 in - 1 in - 6.5 in = 1 in
9. \setlength{\topmargin}{-.75in} % Top margin of 2 in -0.75 in = 1 in
10. \setlength{\textheight}{9.2in} % Lower margin of 11 in - 9 in - 1 in = 1 in
11. \setlength{\parindent}{0in}
12.
13.
14. \makeatletter
15. \newif\if@restonecol
16. \makeatother
17. \let\algorithm\relax
18. \let\endalgorithm\relax
19. \usepackage[linesnumbered,ruled,vlined]{algorithm2e}%[ruled,vlined]{
20. \usepackage{algpseudocode}
21. \renewcommand{\algorithmicrequire}{\textbf{Input:}}
22. \renewcommand{\algorithmicensure}{\textbf{Output:}}
23.
24. \begin{document}
25.
26. \begin{algorithm}
27. \caption{component matrices computing}
28. \LinesNumbered
29. \KwIn{$\mathcal{X}\in\mathbb{R}^{l_1\times l_2\times\cdots\times l_N},\varepsilon,\lambda,\delta,R$}
30. \KwOut{$A^{(j)}s$ for $j=1$ to $N$}
31. \textbf{Initialize} all $A^{(j)}s$ //which can be seen as the $0^{th}$ round iterations\;
32.
33. {$l$\hspace*{-1pt}\textacutedbl}$=L$ //if we need to judge whether $(11)$ is true then {$l$\hspace*{-1pt}\textacutedbl} denotes $L|_{t-1}$\;
34.
35. \For{ each $A_{i_jr}^{{j}}(1\le j\le N,1\le i_j\le I_j,1\le r\le R)$ }
36. {//$1^{st}$ round iterations\;
37. $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)}$\;
38. $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)}$//if the rollback shown as $(12)$ is needed,$A_{i_jr}^{(j)'}$ denotes $A_{i_jr}^{(j)}|_{t-1}$\;
39. $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
40. }
41.
42. \Repeat(//other rounds of iterations for computing component matrices){$\bm{L\le \varepsilon}$ or maximum iterations exhausted}
43. {
44. $l'=L$ //if we need to judge whether $(11)$ is true then $l'$ denotes $L|_t$\;
45. \For{ each $A_{i_jr}^{{j}}(1\le j\le N,1\le i_j\le I_j,1\le r\le R)$}
46. {
47. \If{$g_{i_jr}^{(j)}\cdot g_{i_jr}^{(j)'}>0$}
48. {
49. $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)} $\;
50. $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)} $\;
51. $\delta_{i_jr}^{(j)}=\bm{\min}\left(\delta_{i_jr}^{(j)}\cdot\eta^{+},Max\_Step\_Size\right)$\;
52. $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
53. }
54. \ElseIf{$g_{i_jr}^{(j)}\cdot g_{i_jr}^{(j)'}<0$}
55. {
56. \If{$l'>l$\hspace*{-1pt}\textacutedbl}
57. {
58. $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)}$\;
59. $A_{i_jr}^{(j)}=A_{i_jr}^{(j)'}$// if $(11)$ is true then rollback as $(12)$\;
60. $\delta_{i_jr}^{(j)}=\bm{\max}\left(\delta_{i_jr}^{(j)}\times\eta^{-},Min\_Step\_Size\right)$\;
61. }
62. \Else
63. {
64. $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)} $\;
65. $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)} $\;
66. $\delta_{i_jr}^{(j)}=\bm{\max}\left(\delta_{i_jr}^{(j)}\cdot\eta^{-},Min\_Step\_Size\right)$\;
67. $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
68. }
69. }
70. \Else
71. {
72. $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)} $\;
73. $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)} $\;
74. $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
75. }
76. }
77. $l$\hspace*{-1pt}\textacutedbl$=l'$\;
78. }
79. \end{algorithm}
80. \end{document}
1. \usepackage[ruled,linesnumbered]{algorithm2e}
2. \usepackage{amsmath}
3.
4. \begin{algorithm}
5. \caption{Learning algorithm of R2P}
6. \label{alg:r2p}
7. \KwIn{ratings $R$, joint demographic representations $Y$,learning rate $\eta$,maximum iterative number $maxIter$, negative sampling number $k$\;}
8. \KwOut{interaction matrix $\bm{W}$, movie vectors $V$\;}
9. Initialize $\bm{W},V$ randomly\;
10. $t = 0$\;
11. For convenience, define $\vec{\varphi}_n = \sum_{m\in S_n}r_{m,n}\vec{v}_m$\; %\varphi_n\bm{W}\vec{y}_n
12. \While{not converged \rm{or} $t>maxIter$}
13. {
14. t = t+1\;
15. \For{$n=1;n \le N;n++$}
16. {
17. $\bm{W} = \bm{W}+\eta\big(1-\sigma\left(\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\big)\vec{\varphi}_n\vec{y}_n^T$\;\label{algline:W}
18. \For{$m\in S_n$}
19. {
20. $\vec{v}_m=\vec{v}_m+ \eta\left(1-\sigma\left(\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\right)r_{m,n}\bm{W}\vec{y}_n$\;\label{algline:V}
21. }
22. \For{$i=1;i\le k;i++$}
23. {
24. sample negative sample $\vec{y}_i$ from $P_n$\;
25. $\bm{W} = \bm{W}-\eta\big(1-\sigma\left(-\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\big)\vec{\varphi}_n\vec{y}_i^T$\;
26. \For{$m\in S_n$}
27. {
28. $\vec{v}_m=\vec{v}_m- \eta\left(1-\sigma\left(-\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\right)r_{m,n}\bm{W}\vec{y}_i$\;
29. }
30. }
31. }
32. $\bm{W} = \bm{W}-2\lambda\eta\bm{W}$\;
33. $V=V-2\lambda\eta V$
34. }
35. return $\bm{W},V$\;
36. %\end{algorithmic}
37. \end{algorithm}