@@ -315,6 +315,8 @@ def fc(input,
315
315
.. code-block:: python
316
316
317
317
import paddle.fluid as fluid
318
+ import paddle
319
+ paddle.enable_static()
318
320
# when input is single tensor
319
321
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
320
322
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
@@ -468,6 +470,9 @@ def embedding(input,
468
470
469
471
import paddle.fluid as fluid
470
472
import numpy as np
473
+ import paddle
474
+ paddle.enable_static()
475
+
471
476
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
472
477
473
478
# example 1
@@ -731,6 +736,8 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
731
736
732
737
import paddle.fluid as fluid
733
738
import numpy as np
739
+ import paddle
740
+ paddle.enable_static()
734
741
735
742
#define net structure, using LodTensor
736
743
train_program = fluid.Program()
@@ -855,6 +862,8 @@ def crf_decoding(input, param_attr, label=None, length=None):
855
862
.. code-block:: python
856
863
857
864
import paddle.fluid as fluid
865
+ import paddle
866
+ paddle.enable_static()
858
867
859
868
# LoDTensor-based example
860
869
num_labels = 10
@@ -1458,6 +1467,9 @@ def conv2d(input,
1458
1467
.. code-block:: python
1459
1468
1460
1469
import paddle.fluid as fluid
1470
+ import paddle
1471
+ paddle.enable_static()
1472
+
1461
1473
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
1462
1474
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
1463
1475
"""
@@ -1728,6 +1740,8 @@ def conv3d(input,
1728
1740
.. code-block:: python
1729
1741
1730
1742
import paddle.fluid as fluid
1743
+ import paddle
1744
+ paddle.enable_static()
1731
1745
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
1732
1746
conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
1733
1747
"""
@@ -2377,6 +2391,7 @@ def adaptive_pool2d(input,
2377
2391
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
2378
2392
#
2379
2393
import paddle
2394
+ paddle.enable_static()
2380
2395
data = paddle.rand(shape=[1,3,32,32])
2381
2396
pool_out = paddle.fluid.layers.adaptive_pool2d(
2382
2397
input=data,
@@ -2531,6 +2546,7 @@ def adaptive_pool3d(input,
2531
2546
#
2532
2547
2533
2548
import paddle
2549
+ paddle.enable_static()
2534
2550
data = paddle.rand(shape=[1,3,32,32,32])
2535
2551
pool_out = paddle.fluid.layers.adaptive_pool3d(
2536
2552
input=data,
@@ -2726,6 +2742,8 @@ def batch_norm(input,
2726
2742
.. code-block:: python
2727
2743
2728
2744
import paddle.fluid as fluid
2745
+ import paddle
2746
+ paddle.enable_static()
2729
2747
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
2730
2748
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
2731
2749
hidden2 = fluid.layers.batch_norm(input=hidden1)
@@ -2735,6 +2753,8 @@ def batch_norm(input,
2735
2753
# batch_norm with momentum as Variable
2736
2754
import paddle.fluid as fluid
2737
2755
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
2756
+ import paddle
2757
+ paddle.enable_static()
2738
2758
2739
2759
def get_decay_momentum(momentum_init, decay_steps, decay_rate):
2740
2760
global_step = lr_scheduler._decay_step_counter()
@@ -3134,6 +3154,8 @@ def instance_norm(input,
3134
3154
.. code-block:: python
3135
3155
3136
3156
import paddle.fluid as fluid
3157
+ import paddle
3158
+ paddle.enable_static()
3137
3159
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
3138
3160
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
3139
3161
hidden2 = fluid.layers.instance_norm(input=hidden1)
@@ -3269,6 +3291,7 @@ def data_norm(input,
3269
3291
.. code-block:: python
3270
3292
3271
3293
import paddle
3294
+ paddle.enable_static()
3272
3295
3273
3296
x = paddle.randn(shape=[32,100])
3274
3297
hidden2 = paddle.static.nn.data_norm(input=x)
@@ -3451,6 +3474,8 @@ def layer_norm(input,
3451
3474
3452
3475
import paddle.fluid as fluid
3453
3476
import numpy as np
3477
+ import paddle
3478
+ paddle.enable_static()
3454
3479
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
3455
3480
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
3456
3481
place = fluid.CPUPlace()
@@ -3566,6 +3591,9 @@ def group_norm(input,
3566
3591
.. code-block:: python
3567
3592
3568
3593
import paddle.fluid as fluid
3594
+ import paddle
3595
+ paddle.enable_static()
3596
+
3569
3597
data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
3570
3598
x = fluid.layers.group_norm(input=data, groups=4)
3571
3599
"""
@@ -3887,6 +3915,8 @@ def conv2d_transpose(input,
3887
3915
.. code-block:: python
3888
3916
3889
3917
import paddle.fluid as fluid
3918
+ import paddle
3919
+ paddle.enable_static()
3890
3920
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
3891
3921
conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
3892
3922
"""
@@ -4177,6 +4207,8 @@ def conv3d_transpose(input,
4177
4207
.. code-block:: python
4178
4208
4179
4209
import paddle.fluid as fluid
4210
+ import paddle
4211
+ paddle.enable_static()
4180
4212
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
4181
4213
conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
4182
4214
"""
@@ -4659,7 +4691,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
4659
4691
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
4660
4692
4661
4693
Args:
4662
- input (Variable ): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
4694
+ input (Tensor ): the input tensor, it's data type should be `bool`.
4663
4695
dim (list|int|optional): The dimension along which the logical and is computed.
4664
4696
If :attr:`None`, compute the logical and over all elements of
4665
4697
:attr:`input` and return a Tensor variable with a single element,
@@ -4672,27 +4704,28 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
4672
4704
will be named automatically. The default value is None.
4673
4705
4674
4706
Returns:
4675
- Variable , the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
4707
+ Tensor , the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
4676
4708
4677
4709
Examples:
4678
4710
.. code-block:: python
4679
4711
4712
+ import paddle
4680
4713
import paddle.fluid as fluid
4681
4714
import paddle.fluid.layers as layers
4682
4715
import numpy as np
4683
4716
4684
4717
# x is a bool Tensor variable with following elements:
4685
4718
# [[True, False]
4686
4719
# [True, True]]
4687
- x = layers .assign(np.array([[1, 0], [1, 1]], dtype='int32'))
4688
- x = layers .cast(x, 'bool')
4720
+ x = paddle .assign(np.array([[1, 0], [1, 1]], dtype='int32'))
4721
+ x = paddle .cast(x, 'bool')
4689
4722
4690
- out = layers .reduce_all(x) # False
4691
- out = layers .reduce_all(x, dim=0) # [True, False]
4692
- out = layers .reduce_all(x, dim=-1) # [False, True]
4723
+ out = paddle .reduce_all(x) # False
4724
+ out = paddle .reduce_all(x, dim=0) # [True, False]
4725
+ out = paddle .reduce_all(x, dim=-1) # [False, True]
4693
4726
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
4694
4727
4695
- out = layers .reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
4728
+ out = paddle .reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
4696
4729
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
4697
4730
4698
4731
"""
@@ -4719,7 +4752,7 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
4719
4752
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
4720
4753
4721
4754
Args:
4722
- input (Variable ): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
4755
+ input (Tensor ): the input tensor, it's data type should be `bool`.
4723
4756
dim (list|int|optional): The dimension along which the logical and is computed.
4724
4757
If :attr:`None`, compute the logical and over all elements of
4725
4758
:attr:`input` and return a Tensor variable with a single element,
@@ -4728,30 +4761,31 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
4728
4761
keep_dim (bool): Whether to reserve the reduced dimension in the
4729
4762
output Tensor. The result tensor will have one fewer dimension
4730
4763
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
4731
- name(str|None ): A name for this layer (optional). If set None, the layer
4764
+ name (str, optional ): Name for the operation (optional, default is None ). For more information, please refer to :ref:`api_guide_Name`.
4732
4765
4733
4766
Returns:
4734
- Variable , the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
4767
+ Tensor , the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
4735
4768
4736
4769
Examples:
4737
4770
.. code-block:: python
4738
4771
4772
+ import paddle
4739
4773
import paddle.fluid as fluid
4740
4774
import paddle.fluid.layers as layers
4741
4775
import numpy as np
4742
4776
4743
4777
# x is a bool Tensor variable with following elements:
4744
4778
# [[True, False]
4745
4779
# [False, False]]
4746
- x = layers .assign(np.array([[1, 0], [0, 0]], dtype='int32'))
4747
- x = layers .cast(x, 'bool')
4780
+ x = paddle .assign(np.array([[1, 0], [0, 0]], dtype='int32'))
4781
+ x = paddle .cast(x, 'bool')
4748
4782
4749
- out = layers .reduce_any(x) # True
4750
- out = layers .reduce_any(x, dim=0) # [True, False]
4751
- out = layers .reduce_any(x, dim=-1) # [True, False]
4783
+ out = paddle .reduce_any(x) # True
4784
+ out = paddle .reduce_any(x, dim=0) # [True, False]
4785
+ out = paddle .reduce_any(x, dim=-1) # [True, False]
4752
4786
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
4753
4787
4754
- out = layers .reduce_any(x, dim=1,
4788
+ out = paddle .reduce_any(x, dim=1,
4755
4789
keep_dim=True) # [[True], [False]]
4756
4790
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
4757
4791
@@ -5613,6 +5647,8 @@ def im2sequence(input,
5613
5647
.. code-block:: python
5614
5648
5615
5649
import paddle.fluid as fluid
5650
+ import paddle
5651
+ paddle.enable_static()
5616
5652
data = fluid.data(name='data', shape=[None, 3, 32, 32],
5617
5653
dtype='float32')
5618
5654
output = fluid.layers.im2sequence(
@@ -5669,6 +5705,8 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
5669
5705
Examples:
5670
5706
>>> # for LodTensor inputs
5671
5707
>>> import paddle.fluid as fluid
5708
+ >>> import paddle
5709
+ >>> paddle.enable_static()
5672
5710
>>> x = fluid.data(name='x', shape=[9, 16],
5673
5711
>>> dtype='float32', lod_level=1)
5674
5712
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
@@ -5982,6 +6020,8 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
5982
6020
.. code-block:: python
5983
6021
5984
6022
import paddle.fluid as fluid
6023
+ import paddle
6024
+ paddle.enable_static()
5985
6025
global_step = fluid.layers.autoincreased_step_counter(
5986
6026
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
5987
6027
"""
@@ -9730,6 +9770,8 @@ def prelu(x, mode, param_attr=None, name=None):
9730
9770
.. code-block:: python
9731
9771
9732
9772
import paddle.fluid as fluid
9773
+ import paddle
9774
+ paddle.enable_static()
9733
9775
from paddle.fluid.param_attr import ParamAttr
9734
9776
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
9735
9777
mode = 'channel'
@@ -14307,6 +14349,9 @@ def deformable_conv(input,
14307
14349
#deformable conv v2:
14308
14350
14309
14351
import paddle.fluid as fluid
14352
+ import paddle
14353
+ paddle.enable_static()
14354
+
14310
14355
C_in, H_in, W_in = 3, 32, 32
14311
14356
filter_size, deformable_groups = 3, 1
14312
14357
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
0 commit comments