Skip to content

Commit 8ade000

Browse files
gguusschingor13
authored andcommitted
samples: Cloud Client Vision How-to snippets (#485)
* Add initial cloud client Vision API snippets. * Add Vision API quickstart. (#486)
1 parent 6535771 commit 8ade000

File tree

8 files changed

+614
-0
lines changed

8 files changed

+614
-0
lines changed
91.1 KB
Loading
158 KB
Loading

vision/snippets/resources/logos.png

8.06 KB
Loading

vision/snippets/resources/text.jpg

122 KB
Loading
63.4 KB
Loading
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,390 @@
1+
/**
2+
* Copyright 2017, Google, Inc.
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.vision;
18+
19+
20+
import com.google.cloud.vision.spi.v1.ImageAnnotatorClient;
21+
import com.google.cloud.vision.v1.AnnotateImageRequest;
22+
import com.google.cloud.vision.v1.AnnotateImageResponse;
23+
import com.google.cloud.vision.v1.BatchAnnotateImagesResponse;
24+
import com.google.cloud.vision.v1.ColorInfo;
25+
import com.google.cloud.vision.v1.DominantColorsAnnotation;
26+
import com.google.cloud.vision.v1.EntityAnnotation;
27+
import com.google.cloud.vision.v1.FaceAnnotation;
28+
import com.google.cloud.vision.v1.Feature;
29+
import com.google.cloud.vision.v1.Feature.Type;
30+
import com.google.cloud.vision.v1.Image;
31+
import com.google.cloud.vision.v1.LocationInfo;
32+
import com.google.cloud.vision.v1.SafeSearchAnnotation;
33+
import com.google.protobuf.ByteString;
34+
35+
import java.io.FileInputStream;
36+
import java.io.IOException;
37+
import java.io.PrintStream;
38+
import java.util.ArrayList;
39+
import java.util.List;
40+
41+
public class Detect {
42+
43+
/**
44+
* Detects entities,sentiment and syntax in a document using the Natural Language API.
45+
* @throws IOException on Input/Output errors.
46+
*/
47+
public static void main(String[] args) throws IOException {
48+
argsHelper(args, System.out);
49+
}
50+
51+
/**
52+
* Helper that handles the input passed to the program.
53+
* @throws IOException on Input/Output errors.
54+
*/
55+
public static void argsHelper(String[] args, PrintStream out) throws IOException {
56+
if (args.length < 1) {
57+
out.println("Usage:");
58+
out.printf(
59+
"\tjava %s \"<command>\" \"<path-to-image>\"\n"
60+
+ "Commands:\n"
61+
+ "\tall-local | faces | labels | landmarks | logos | text | safe-search | properties\n"
62+
+ "Path:\n\tA file path (ex: ./resources/wakeupcat.jpg) or a URI for a Cloud Storage "
63+
+ "resource (gs://...)\n",
64+
Detect.class.getCanonicalName());
65+
return;
66+
}
67+
String command = args[0];
68+
String path = args.length > 1 ? args[1] : "";
69+
70+
Detect app = new Detect(ImageAnnotatorClient.create());
71+
if (command.equals("all-local")) {
72+
detectFaces("resources/face_no_surprise.jpg", out);
73+
detectLabels("resources/wakeupcat.jpg", out);
74+
detectLandmarks("resources/landmark.jpg", out);
75+
detectLogos("resources/logos.png", out);
76+
detectText("resources/text.jpg", out);
77+
detectProperties("resources/landmark.jpg", out);
78+
detectSafeSearch("resources/wakeupcat.jpg", out);
79+
} else if (command.equals("faces")) {
80+
if (path.startsWith("gs://")) {
81+
// TODO: See https://goo.gl/uWgYhQ
82+
} else {
83+
detectFaces(path, out);
84+
}
85+
} else if (command.equals("labels")) {
86+
if (path.startsWith("gs://")) {
87+
// TODO: See https://goo.gl/uWgYhQ
88+
} else {
89+
detectLabels(path, out);
90+
}
91+
} else if (command.equals("landmarks")) {
92+
if (path.startsWith("gs://")) {
93+
// TODO: See https://goo.gl/uWgYhQ
94+
} else {
95+
detectLandmarks(path, out);
96+
}
97+
} else if (command.equals("logos")) {
98+
if (path.startsWith("gs://")) {
99+
// TODO: See https://goo.gl/uWgYhQ
100+
} else {
101+
detectLogos(path, out);
102+
}
103+
} else if (command.equals("text")) {
104+
if (path.startsWith("gs://")) {
105+
// TODO: See https://goo.gl/uWgYhQ
106+
} else {
107+
detectText(path, out);
108+
}
109+
} else if (command.equals("properties")) {
110+
if (path.startsWith("gs://")) {
111+
// TODO: See https://goo.gl/uWgYhQ
112+
} else {
113+
detectProperties(path, out);
114+
}
115+
} else if (command.equals("safe-search")) {
116+
if (path.startsWith("gs://")) {
117+
// TODO: See https://goo.gl/uWgYhQ
118+
} else {
119+
detectSafeSearch(path, out);
120+
}
121+
}
122+
}
123+
124+
private static ImageAnnotatorClient visionApi;
125+
126+
/**
127+
* Constructs a {@link Detect} which connects to the Cloud Vision API.
128+
*/
129+
public Detect(ImageAnnotatorClient client) {
130+
visionApi = client;
131+
}
132+
133+
/**
134+
* Detects faces in the specified image.
135+
* @param filePath The path to the file to perform face detection on.
136+
* @param out A {@link PrintStream} to write detected features to.
137+
* @throws IOException on Input/Output errors.
138+
*/
139+
public static void detectFaces(String filePath, PrintStream out) throws IOException {
140+
List<AnnotateImageRequest> requests = new ArrayList<>();
141+
142+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
143+
144+
Image img = Image.newBuilder().setContent(imgBytes).build();
145+
Feature feat = Feature.newBuilder().setType(Type.FACE_DETECTION).build();
146+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
147+
.addFeatures(feat)
148+
.setImage(img)
149+
.build();
150+
requests.add(request);
151+
152+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
153+
List<AnnotateImageResponse> responses = response.getResponsesList();
154+
155+
for (AnnotateImageResponse res : responses) {
156+
if (res.hasError()) {
157+
out.printf("Error: %s\n", res.getError().getMessage());
158+
return;
159+
}
160+
161+
// For full list of available annotations, see http://g.co/cloud/vision/docs
162+
for (FaceAnnotation annotation : res.getFaceAnnotationsList()) {
163+
out.printf("anger: %s\njoy: %s\nsurprise: %s\nposition: %s",
164+
annotation.getAngerLikelihood(),
165+
annotation.getJoyLikelihood(),
166+
annotation.getSurpriseLikelihood(),
167+
annotation.getBoundingPoly());
168+
}
169+
}
170+
}
171+
172+
/**
173+
* Detects labels in the specified image.
174+
* @param filePath The path to the file to perform label detection on.
175+
* @param out A {@link PrintStream} to write detected labels to.
176+
* @throws IOException on Input/Output errors.
177+
*/
178+
public static void detectLabels(String filePath, PrintStream out) throws IOException {
179+
List<AnnotateImageRequest> requests = new ArrayList<>();
180+
181+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
182+
183+
Image img = Image.newBuilder().setContent(imgBytes).build();
184+
Feature feat = Feature.newBuilder().setType(Type.LABEL_DETECTION).build();
185+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
186+
.addFeatures(feat)
187+
.setImage(img)
188+
.build();
189+
requests.add(request);
190+
191+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
192+
List<AnnotateImageResponse> responses = response.getResponsesList();
193+
194+
for (AnnotateImageResponse res : responses) {
195+
if (res.hasError()) {
196+
out.printf("Error: %s\n", res.getError().getMessage());
197+
return;
198+
}
199+
200+
// For full list of available annotations, see http://g.co/cloud/vision/docs
201+
for (EntityAnnotation annotation : res.getLabelAnnotationsList()) {
202+
annotation.getAllFields().forEach((k, v)->out.printf("%s : %s\n", k, v.toString()));
203+
}
204+
}
205+
}
206+
207+
/**
208+
* Detects landmarks in the specified image.
209+
* @param filePath The path to the file to perform landmark detection on.
210+
* @param out A {@link PrintStream} to write detected landmarks to.
211+
* @throws IOException on Input/Output errors.
212+
*/
213+
public static void detectLandmarks(String filePath, PrintStream out) throws IOException {
214+
List<AnnotateImageRequest> requests = new ArrayList<>();
215+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
216+
217+
Image img = Image.newBuilder().setContent(imgBytes).build();
218+
Feature feat = Feature.newBuilder().setType(Type.LANDMARK_DETECTION).build();
219+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
220+
.addFeatures(feat)
221+
.setImage(img)
222+
.build();
223+
requests.add(request);
224+
225+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
226+
List<AnnotateImageResponse> responses = response.getResponsesList();
227+
228+
for (AnnotateImageResponse res : responses) {
229+
if (res.hasError()) {
230+
out.printf("Error: %s\n", res.getError().getMessage());
231+
return;
232+
}
233+
234+
// For full list of available annotations, see http://g.co/cloud/vision/docs
235+
for (EntityAnnotation annotation : res.getLandmarkAnnotationsList()) {
236+
LocationInfo info = annotation.getLocationsList().listIterator().next();
237+
out.printf("Landmark: %s\n %s\n", annotation.getDescription(), info.getLatLng());
238+
}
239+
}
240+
}
241+
242+
/**
243+
* Detects logos in the specified image.
244+
* @param filePath The path to the file to perform logo detection on.
245+
* @param out A {@link PrintStream} to write detected logos to.
246+
* @throws IOException on Input/Output errors.
247+
*/
248+
public static void detectLogos(String filePath, PrintStream out) throws IOException {
249+
List<AnnotateImageRequest> requests = new ArrayList<>();
250+
251+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
252+
253+
Image img = Image.newBuilder().setContent(imgBytes).build();
254+
Feature feat = Feature.newBuilder().setType(Type.LOGO_DETECTION).build();
255+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
256+
.addFeatures(feat)
257+
.setImage(img)
258+
.build();
259+
requests.add(request);
260+
261+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
262+
List<AnnotateImageResponse> responses = response.getResponsesList();
263+
264+
for (AnnotateImageResponse res : responses) {
265+
if (res.hasError()) {
266+
out.printf("Error: %s\n", res.getError().getMessage());
267+
return;
268+
}
269+
270+
// For full list of available annotations, see http://g.co/cloud/vision/docs
271+
for (EntityAnnotation annotation : res.getLogoAnnotationsList()) {
272+
out.println(annotation.getDescription());
273+
}
274+
}
275+
}
276+
277+
/**
278+
* Detects text in the specified image.
279+
* @param filePath The path to the file to detect text in.
280+
* @param out A {@link PrintStream} to write the detected text to.
281+
* @throws IOException on Input/Output errors.
282+
*/
283+
public static void detectText(String filePath, PrintStream out) throws IOException {
284+
List<AnnotateImageRequest> requests = new ArrayList<>();
285+
286+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
287+
288+
Image img = Image.newBuilder().setContent(imgBytes).build();
289+
Feature feat = Feature.newBuilder().setType(Type.TEXT_DETECTION).build();
290+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
291+
.addFeatures(feat)
292+
.setImage(img)
293+
.build();
294+
requests.add(request);
295+
296+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
297+
List<AnnotateImageResponse> responses = response.getResponsesList();
298+
299+
for (AnnotateImageResponse res : responses) {
300+
if (res.hasError()) {
301+
out.printf("Error: %s\n", res.getError().getMessage());
302+
return;
303+
}
304+
305+
// For full list of available annotations, see http://g.co/cloud/vision/docs
306+
for (EntityAnnotation annotation : res.getTextAnnotationsList()) {
307+
out.printf("Text: %s\n", annotation.getDescription());
308+
out.printf("Position : %s\n", annotation.getBoundingPoly());
309+
}
310+
}
311+
}
312+
313+
/**
314+
* Detects image properties such as color frequency from the specified image.
315+
* @param filePath The path to the file to detect properties.
316+
* @param out A {@link PrintStream} to write
317+
* @throws IOException on Input/Output errors.
318+
*/
319+
public static void detectProperties(String filePath, PrintStream out) throws IOException {
320+
List<AnnotateImageRequest> requests = new ArrayList<>();
321+
322+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
323+
324+
Image img = Image.newBuilder().setContent(imgBytes).build();
325+
Feature feat = Feature.newBuilder().setType(Type.IMAGE_PROPERTIES).build();
326+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
327+
.addFeatures(feat)
328+
.setImage(img)
329+
.build();
330+
requests.add(request);
331+
332+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
333+
List<AnnotateImageResponse> responses = response.getResponsesList();
334+
335+
for (AnnotateImageResponse res : responses) {
336+
if (res.hasError()) {
337+
out.printf("Error: %s\n", res.getError().getMessage());
338+
return;
339+
}
340+
341+
// For full list of available annotations, see http://g.co/cloud/vision/docs
342+
DominantColorsAnnotation colors = res.getImagePropertiesAnnotation().getDominantColors();
343+
for (ColorInfo color : colors.getColorsList()) {
344+
out.printf("fraction: %f\nr: %f, g: %f, b: %f\n",
345+
color.getPixelFraction(),
346+
color.getColor().getRed(),
347+
color.getColor().getGreen(),
348+
color.getColor().getBlue());
349+
}
350+
}
351+
}
352+
353+
/**
354+
* Detects whether the specified image has features you would want to moderate.
355+
* @param filePath The path to the file used for safe search detection.
356+
* @param out A {@link PrintStream} to write the results to.
357+
* @throws IOException on Input/Output errors.
358+
*/
359+
public static void detectSafeSearch(String filePath, PrintStream out) throws IOException {
360+
List<AnnotateImageRequest> requests = new ArrayList<>();
361+
362+
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
363+
364+
Image img = Image.newBuilder().setContent(imgBytes).build();
365+
Feature feat = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
366+
AnnotateImageRequest request = AnnotateImageRequest.newBuilder()
367+
.addFeatures(feat)
368+
.setImage(img)
369+
.build();
370+
requests.add(request);
371+
372+
BatchAnnotateImagesResponse response = visionApi.batchAnnotateImages(requests);
373+
List<AnnotateImageResponse> responses = response.getResponsesList();
374+
375+
for (AnnotateImageResponse res : responses) {
376+
if (res.hasError()) {
377+
out.printf("Error: %s\n", res.getError().getMessage());
378+
return;
379+
}
380+
381+
// For full list of available annotations, see http://g.co/cloud/vision/docs
382+
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
383+
out.printf("adult: %s\nmedical: %s\nspoofed: %s\nviolence: %s\n",
384+
annotation.getAdult(),
385+
annotation.getMedical(),
386+
annotation.getSpoof(),
387+
annotation.getViolence());
388+
}
389+
}
390+
}

0 commit comments

Comments
 (0)