2020-10-19 21:08:55

This commit is contained in:
wizardforcel
2020-10-19 21:08:55 +08:00
parent 7f63048035
commit ab0caba1f0
140 changed files with 3982 additions and 3982 deletions

View File

@@ -2,7 +2,7 @@
In [1]:
```
```py
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
@@ -13,7 +13,7 @@ import matplotlib.pyplot as plt
In [2]:
```
```py
import theano
# 一般都把 `tensor` 子模块导入并命名为 T
@@ -21,7 +21,7 @@ import theano.tensor as T
```
```
```py
Using gpu device 1: Tesla K10.G2.8GB (CNMeM is disabled)
```
@@ -36,19 +36,19 @@ Using gpu device 1: Tesla K10.G2.8GB (CNMeM is disabled)
In [3]:
```
```py
foo = T.scalar('x')
```
In [4]:
```
```py
print foo
```
```
```py
x
```
@@ -57,14 +57,14 @@ x
In [5]:
```
```py
bar = foo ** 2
print bar
```
```
```py
Elemwise{pow,no_inplace}.0
```
@@ -75,12 +75,12 @@ Elemwise{pow,no_inplace}.0
In [6]:
```
```py
print theano.pp(bar)
```
```
```py
(x ** TensorConstant{2})
```
@@ -89,13 +89,13 @@ print theano.pp(bar)
In [7]:
```
```py
print type(foo)
print foo.type
```
```
```py
<class 'theano.tensor.var.TensorVariable'>
TensorType(float32, scalar)
@@ -105,7 +105,7 @@ TensorType(float32, scalar)
有了符号变量,自然可以用符号变量来定义函数,`theano.function()` 函数用来生成符号函数:
```
```py
theano.function(input, output)
```
@@ -115,7 +115,7 @@ theano.function(input, output)
In [8]:
```
```py
square = theano.function([foo], bar)
```
@@ -124,12 +124,12 @@ square = theano.function([foo], bar)
In [9]:
```
```py
print square(3)
```
```
```py
9.0
```
@@ -138,12 +138,12 @@ print square(3)
In [10]:
```
```py
print bar.eval({foo: 3})
```
```
```py
9.0
```
@@ -184,14 +184,14 @@ print bar.eval({foo: 3})
除此之外,还可以用它们的复数形式一次定义多个符号变量:
```
```py
x,y,z = T.vectors('x','y','z')
x,y,z = T.vectors(3)
```
In [11]:
```
```py
A = T.matrix('A')
x = T.vector('x')
b = T.vector('b')
@@ -202,7 +202,7 @@ b = T.vector('b')
In [12]:
```
```py
y = T.dot(A, x) + b
```
@@ -211,7 +211,7 @@ y = T.dot(A, x) + b
In [13]:
```
```py
z = T.sum(A**2)
```
@@ -220,7 +220,7 @@ z = T.sum(A**2)
In [14]:
```
```py
linear_mix = theano.function([A, x, b],
[y, z])
@@ -230,7 +230,7 @@ linear_mix = theano.function([A, x, b],
$$ A = \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix}, x = \begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix}, b = \begin{bmatrix} 4 \\ 5 \end{bmatrix} $$In [15]:
```
```py
print linear_mix(np.array([[1, 2, 3],
[4, 5, 6]], dtype=theano.config.floatX), #A
np.array([1, 2, 3], dtype=theano.config.floatX), #x
@@ -238,7 +238,7 @@ print linear_mix(np.array([[1, 2, 3],
```
```
```py
[array([ 18., 37.], dtype=float32), array(91.0, dtype=float32)]
```
@@ -249,7 +249,7 @@ print linear_mix(np.array([[1, 2, 3],
In [16]:
```
```py
linear_mix_default = theano.function([A, x, theano.Param(b, default=np.zeros(2, dtype=theano.config.floatX))],
[y, z])
@@ -259,14 +259,14 @@ linear_mix_default = theano.function([A, x, theano.Param(b, default=np.zeros(2,
In [17]:
```
```py
print linear_mix_default(np.array([[1, 2, 3],
[4, 5, 6]], dtype=theano.config.floatX), #A
np.array([1, 2, 3], dtype=theano.config.floatX)) #x
```
```
```py
[array([ 14., 32.], dtype=float32), array(91.0, dtype=float32)]
```
@@ -275,7 +275,7 @@ print linear_mix_default(np.array([[1, 2, 3],
In [18]:
```
```py
print linear_mix_default(np.array([[1, 2, 3],
[4, 5, 6]], dtype=theano.config.floatX), #A
np.array([1, 2, 3], dtype=theano.config.floatX), #x
@@ -283,7 +283,7 @@ print linear_mix_default(np.array([[1, 2, 3],
```
```
```py
[array([ 18., 37.], dtype=float32), array(91.0, dtype=float32)]
```
@@ -294,14 +294,14 @@ print linear_mix_default(np.array([[1, 2, 3],
In [19]:
```
```py
shared_var = theano.shared(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=theano.config.floatX))
print shared_var.type
```
```
```py
CudaNdarrayType(float32, matrix)
```
@@ -310,7 +310,7 @@ CudaNdarrayType(float32, matrix)
In [20]:
```
```py
shared_var.set_value(np.array([[3.0, 4], [2, 1]], dtype=theano.config.floatX))
```
@@ -319,12 +319,12 @@ shared_var.set_value(np.array([[3.0, 4], [2, 1]], dtype=theano.config.floatX))
In [21]:
```
```py
print shared_var.get_value()
```
```
```py
[[ 3\. 4.]
[ 2\. 1.]]
@@ -334,7 +334,7 @@ print shared_var.get_value()
In [22]:
```
```py
shared_square = shared_var ** 2
f = theano.function([], shared_square)
@@ -343,7 +343,7 @@ print f()
```
```
```py
[[ 9\. 16.]
[ 4\. 1.]]
@@ -355,14 +355,14 @@ print f()
In [23]:
```
```py
shared_var.set_value(np.array([[1.0, 2], [3, 4]], dtype=theano.config.floatX))
print f()
```
```
```py
[[ 1\. 4.]
[ 9\. 16.]]
@@ -372,7 +372,7 @@ print f()
In [24]:
```
```py
subtract = T.matrix('subtract')
f_update = theano.function([subtract], shared_var, updates={shared_var: shared_var - subtract})
@@ -383,7 +383,7 @@ f_update = theano.function([subtract], shared_var, updates={shared_var: shared_v
In [25]:
```
```py
print 'before update:'
print shared_var.get_value()
@@ -395,7 +395,7 @@ print shared_var.get_value()
```
```
```py
before update:
[[ 1\. 2.]
[ 3\. 4.]]
@@ -415,14 +415,14 @@ after update:
In [26]:
```
```py
bar_grad = T.grad(bar, foo) # 表示 bar (x^2) 关于 foo (x) 的导数
print bar_grad.eval({foo: 10})
```
```
```py
20.0
```
@@ -431,7 +431,7 @@ print bar_grad.eval({foo: 10})
In [27]:
```
```py
y_J = theano.gradient.jacobian(y, x)
print y_J.eval({A: np.array([[9.0, 8, 7], [4, 5, 6]], dtype=theano.config.floatX), #A
@@ -440,7 +440,7 @@ print y_J.eval({A: np.array([[9.0, 8, 7], [4, 5, 6]], dtype=theano.config.floatX
```
```
```py
[[ 9\. 8\. 7.]
[ 4\. 5\. 6.]]
@@ -456,7 +456,7 @@ Rop 用来计算 $\frac{\partial f}{\partial x}v$Lop 用来计算 $v\frac{\pa
In [28]:
```
```py
W = T.dmatrix('W')
V = T.dmatrix('V')
x = T.dvector('x')
@@ -468,7 +468,7 @@ print f([[1, 1], [1, 1]], [[2, 2], [2, 2]], [0,1])
```
```
```py
[ 2\. 2.]
```