Skip to content
This repository was archived by the owner on Aug 5, 2022. It is now read-only.

Commit c7ed327

Browse files
committed
merge release_1.0.6
2 parents 16f8c2b + f77f7e0 commit c7ed327

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+19621
-5785
lines changed

Makefile

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,11 @@ endif
6464
#################### MLSL ####################
6565

6666
ifeq ($(USE_MLSL), 1)
67+
68+
ifeq ($(CPU_ONLY), 0)
69+
$(error Multi-node is not supported if CPU_ONLY is disabled. Please set CPU_ONLY=1 if USE_MLSL=1)
70+
endif
71+
6772
RETURN_STRING=$(shell ./external/mlsl/prepare_mlsl.sh)
6873
MLSL_ROOT=$(firstword $(RETURN_STRING))
6974
MLSL_LDFLAGS=$(lastword $(RETURN_STRING))

cmake/Dependencies.cmake

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,10 @@ endif()
9696

9797
# ---[ MLSL
9898
if(USE_MLSL)
99+
if (NOT CPU_ONLY)
100+
message(FATAL_ERROR "Multi-node is not supported if CPU_ONLY is disabled. Please set CPU_ONLY=1 if USE_MLSL=1.")
101+
endif()
102+
99103
#--find mlsl in external/mkl
100104
set(script_cmd "./external/mlsl/prepare_mlsl.sh" )
101105
execute_process(COMMAND ${script_cmd}

data/Celeb-A/celebA.txt

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
/Celeb-A_Cropped/000001.jpg 1
2+
/Celeb-A_Cropped/000002.jpg 1
3+
/Celeb-A_Cropped/000003.jpg 1
4+
/Celeb-A_Cropped/000004.jpg 1
5+
/Celeb-A_Cropped/000005.jpg 1
6+
/Celeb-A_Cropped/000006.jpg 1
7+
/Celeb-A_Cropped/000007.jpg 1
8+
/Celeb-A_Cropped/000008.jpg 1
9+
/Celeb-A_Cropped/000009.jpg 1
10+
/Celeb-A_Cropped/000010.jpg 1

data/Celeb-A/crop_celebA.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
from PIL import Image
2+
import os
3+
import sys
4+
5+
print ""
6+
print "Prepare Celeb-A Dataset! (1. Crop the images. 2. Generate a train list file.)"
7+
print ""
8+
print "-------------------------------------------------------------------------------"
9+
10+
current_path = os.getcwd()
11+
celebA_path = ""
12+
celebA_cropped_path = ""
13+
print "The current path containing this python file is: " + current_path
14+
if len(sys.argv) == 1:
15+
print "Please give the path of original Celeb-A dataset!"
16+
exit(0)
17+
elif len(sys.argv) > 1:
18+
print "The path of original Celeb-A dataset is: " + str(sys.argv[1])
19+
celebA_path = sys.argv[1]
20+
celebA_cropped_path = os.path.dirname(celebA_path) + os.sep + "Cropped" #To avoid crop the generated images again if this parameter is not provided
21+
if len(sys.argv) > 2:
22+
print "The path of cropped Celeb-A dataset will be: " + str(sys.argv[2])
23+
celebA_cropped_path = sys.argv[2]
24+
else:
25+
print "The path of cropped Celeb-A dataset will be defult, set as: " + celebA_cropped_path
26+
27+
if os.path.exists(celebA_cropped_path):
28+
print "The path of cropped Celeb-A dataset exists."
29+
else:
30+
print "The path of cropped Celeb-A dataset doesn't exist! I will create it now!"
31+
os.makedirs(celebA_cropped_path)
32+
print "-------------------------------------------------------------------------------"
33+
34+
training_list_file = os.path.join(celebA_cropped_path, "celebA.txt")
35+
list_file = open(training_list_file, 'w')
36+
total_image_num = 0
37+
x1, y1 = 30, 40
38+
cropped_box = (x1, y1, x1 + 138, y1 + 138)
39+
40+
for parent,dirnames,filenames in os.walk(celebA_path):
41+
for filename in filenames:
42+
if filename.endswith(".jpg"):
43+
total_image_num += 1
44+
#print "parent is:" + parent
45+
#print "filename is:" + filename
46+
image_path_and_name = os.path.join(parent,filename)
47+
print "the full name of the file is: " + image_path_and_name
48+
input_image = Image.open(image_path_and_name)
49+
#input_image.show()
50+
cropped_image = input_image.crop(cropped_box)
51+
#cropped_image.show()
52+
scaled_cropped_image = cropped_image.resize((64, 64))
53+
#scaled_cropped_image.show()
54+
save_result_image_path_and_name = os.path.join(celebA_cropped_path,filename)
55+
scaled_cropped_image.save(save_result_image_path_and_name, 'jpeg')
56+
list_file.writelines(save_result_image_path_and_name)
57+
list_file.writelines(" 1" + "\n") #Must add label to list file
58+
print "There are " + str(total_image_num) + " images are finished with cropping and scaling operations!"
59+
list_file.close()

docker/standalone/cpu-centos/Dockerfile

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ MAINTAINER [email protected]
44
#ENV http_proxy proxy:port
55
#ENV https_proxy proxy:port
66

7-
RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
7+
RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-10.noarch.rpm
8+
9+
RUN yum upgrade -y
810

911
RUN yum install -y \
1012
redhat-rpm-config \
@@ -15,6 +17,7 @@ RUN yum install -y \
1517
cmake \
1618
git \
1719
wget \
20+
ssh \
1821
atlas-devel \
1922
boost-devel \
2023
gflags-devel \

docker/standalone/cpu-ubuntu/Dockerfile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM ubuntu:14.04
1+
FROM ubuntu:16.04
22
33

44
#ENV http_proxy proxy:port
@@ -9,6 +9,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
99
cmake \
1010
git \
1111
wget \
12+
ssh \
1213
libboost-all-dev \
1314
libgflags-dev \
1415
libgoogle-glog-dev \

docker/templates/Dockerfile.template

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
88
cmake \
99
git \
1010
wget \
11+
ssh \
1112
libatlas-base-dev \
1213
libboost-all-dev \
1314
libgflags-dev \

include/caffe/layers/mkldnn_layers.hpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ class MKLDNNBatchNormLayer : public MKLDNNLayer<Dtype>, public Layer<Dtype> {
6969
, bwd_top_diff(), bwd_bottom_diff()
7070
, BatchNormFwd_pd(), BatchNormBwd_pd()
7171
, scaleshift_memory(), bwd_scaleshift_diff_memory()
72-
, output_memory(), bwd_bottom_diff_memory(), inplace_buffer_memory()
72+
, output_memory(), bwd_bottom_diff_memory()
7373
, input_primitive(), bwd_top_diff_primitive()
7474
{
7575
PERFORMANCE_EVENT_ID_RESET(perf_id_fw_);
@@ -95,12 +95,10 @@ class MKLDNNBatchNormLayer : public MKLDNNLayer<Dtype>, public Layer<Dtype> {
9595
void InitBatchNormBwd(const vector<Blob<Dtype>*>& top,
9696
const vector<bool>& propagate_down,
9797
const vector<Blob<Dtype>*>& bottom);
98-
void InitBatchNormFwdPrimitive(int stats_batch_idx, bool inplace);
99-
void InitBatchNormBwdPrimitive(int stats_batch_idx, bool inplace);
98+
void InitBatchNormFwdPrimitive(int stats_batch_idx);
99+
void InitBatchNormBwdPrimitive(int stats_batch_idx);
100100
template <bool diff> shared_ptr<memory> GetStatsBatchMemory(
101101
shared_ptr<MKLDNNMemoryDescriptor<Dtype, diff> > mkldnn_data, int idx);
102-
template <bool diff> shared_ptr<memory> GetStatsBatchMemoryInplace(
103-
shared_ptr<MKLDNNMemoryDescriptor<Dtype, diff> > mkldnn_data, int idx, shared_ptr<memory > buffer_memory);
104102
void InitStatsBatchVars(int batch_size);
105103
shared_ptr<MKLDNNData<Dtype> > fwd_top_data, fwd_bottom_data;
106104
shared_ptr<MKLDNNDiff<Dtype> > bwd_top_diff, bwd_bottom_diff;
@@ -112,8 +110,8 @@ class MKLDNNBatchNormLayer : public MKLDNNLayer<Dtype>, public Layer<Dtype> {
112110

113111
shared_ptr<memory> scaleshift_memory, bwd_scaleshift_diff_memory;
114112
shared_ptr<memory> output_memory, bwd_bottom_diff_memory;
115-
shared_ptr<memory> inplace_buffer_memory;
116-
vector<shared_ptr<memory> > input_stats, output_stats, top_diff_stats, bottom_diff_stats, input_inplace_buffer;
113+
114+
vector<shared_ptr<memory> > input_stats, output_stats, top_diff_stats, bottom_diff_stats;
117115

118116
shared_ptr<primitive> input_primitive, bwd_top_diff_primitive;
119117

@@ -124,6 +122,7 @@ class MKLDNNBatchNormLayer : public MKLDNNLayer<Dtype>, public Layer<Dtype> {
124122
int stats_batch_size_;
125123
shared_ptr<Blob<Dtype> > scaleshift_blob_;
126124
shared_ptr<Blob<Dtype> > scaleshift_acc_;
125+
Blob<Dtype> inplace_buffer;
127126

128127
PERFORMANCE_EVENT_ID_DECL(perf_id_fw_);
129128
PERFORMANCE_EVENT_ID_DECL(perf_id_bw_);
@@ -224,7 +223,7 @@ class MKLDNNInnerProductLayer : public MKLDNNLayer<Dtype> , public InnerProductL
224223
, bwdd_top_diff_primitive, bwdd_weights_data_primitive
225224
, bwdw_top_diff_primitive, bwdw_bottom_data_primitive;
226225
int32_t w_, h_;
227-
226+
228227
/* In case of (iter_size > 1) we need additional buffers */
229228
shared_ptr<MKLDNNDiff<Dtype> > bwdw_weights_diff_iter, bwdw_bias_diff_iter;
230229
shared_ptr<memory> bwdw_weights_diff_memory_iter, bwdw_bias_diff_memory_iter;
@@ -322,13 +321,14 @@ class MKLDNNPoolingLayer : public MKLDNNLayer<Dtype>, public Layer<Dtype> {
322321
,const vector<Blob<Dtype>*>& bottom);
323322
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down
324323
,const vector<Blob<Dtype>*>& bottom);
324+
virtual void compute_output_shape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
325325

326326
private:
327327
void InitPoolingFwd(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
328328
void InitPoolingBwd(const vector<Blob<Dtype>*>& bottom
329329
, const vector<bool>& propagate_down
330330
, const vector<Blob<Dtype>*>& top);
331-
331+
332332
shared_ptr<MKLDNNData<Dtype>> fwd_bottom_data, fwd_top_data;
333333
shared_ptr<MKLDNNDiff<Dtype>> bwd_top_diff, bwd_bottom_diff;
334334
shared_ptr<pooling_forward::primitive_desc> poolingFwd_pd;
@@ -408,7 +408,7 @@ class MKLDNNConcatLayer : public MKLDNNLayer<Dtype> , public Layer<Dtype> {
408408
: MKLDNNLayer<Dtype>(), Layer<Dtype>(param),
409409
concatFwd_pd(), fwd_output_memory(),
410410
bwd_reorder_input_memory(), bwd_reorder_output_memory(),
411-
fwd_top_data(), fwd_bottom_data(), split_channels() {
411+
fwd_top_data(), fwd_bottom_data(), split_dims() {
412412
PERFORMANCE_EVENT_ID_RESET(perf_id_fw_);
413413
PERFORMANCE_EVENT_ID_RESET(perf_id_bw_);
414414
}
@@ -440,7 +440,7 @@ class MKLDNNConcatLayer : public MKLDNNLayer<Dtype> , public Layer<Dtype> {
440440
shared_ptr<MKLDNNDiff<Dtype> > bwd_top_diff;
441441
vector<shared_ptr<MKLDNNDiff<Dtype> > > bwd_bottom_diff;
442442
vector<MKLDNNPrimitive<Dtype> > reorders;
443-
vector<int> split_channels;
443+
vector<int> split_dims;
444444

445445
int32_t num_, width_, height_, channels_, num_concats_;
446446
int concat_dimension;

include/caffe/mkldnn_base.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,8 @@ class MKLDNNLayer {
196196
public:
197197
explicit MKLDNNLayer() {}
198198
virtual ~MKLDNNLayer() {}
199+
protected:
200+
bool reshape;
199201
};
200202

201203
// ===== MKLDNNPrimitive =======================================

include/caffe/net.hpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,9 @@ class Net {
326326
/// @brief return whether NetState state meets NetStateRule rule
327327
static bool StateMeetsRule(const NetState& state, const NetStateRule& rule,
328328
const string& layer_name);
329+
inline const map<string,int>& blob_names_index() const {
330+
return blob_names_index_;
331+
}
329332

330333
protected:
331334
// Helpers for Init.

0 commit comments

Comments
 (0)