@@ -5,28 +5,28 @@ const Google: ModelProviderCard = {
5
5
{
6
6
description : 'A legacy text-only model optimized for chat conversations' ,
7
7
displayName : 'PaLM 2 Chat (Legacy)' ,
8
- hidden : true ,
9
8
id : 'chat-bison-001' ,
10
9
maxOutput : 1024 ,
11
10
} ,
12
11
{
13
12
description : 'A legacy model that understands text and generates text as an output' ,
14
13
displayName : 'PaLM 2 (Legacy)' ,
15
- hidden : true ,
16
14
id : 'text-bison-001' ,
17
15
maxOutput : 1024 ,
18
16
tokens : 9220 ,
19
17
} ,
20
18
{
21
19
description : 'The best model for scaling across a wide range of tasks' ,
22
20
displayName : 'Gemini 1.0 Pro' ,
21
+ enabled : true ,
23
22
id : 'gemini-pro' ,
24
23
maxOutput : 2048 ,
25
24
tokens : 32_768 ,
26
25
} ,
27
26
{
28
27
description : 'The best image understanding model to handle a broad range of applications' ,
29
28
displayName : 'Gemini 1.0 Pro Vision' ,
29
+ enabled : true ,
30
30
id : 'gemini-1.0-pro-vision-latest' ,
31
31
maxOutput : 4096 ,
32
32
tokens : 16_384 ,
@@ -35,7 +35,6 @@ const Google: ModelProviderCard = {
35
35
{
36
36
description : 'The best image understanding model to handle a broad range of applications' ,
37
37
displayName : 'Gemini 1.0 Pro Vision' ,
38
- hidden : true ,
39
38
id : 'gemini-pro-vision' ,
40
39
maxOutput : 4096 ,
41
40
tokens : 16_384 ,
@@ -45,7 +44,6 @@ const Google: ModelProviderCard = {
45
44
description :
46
45
'The best model for scaling across a wide range of tasks. This is a stable model that supports tuning.' ,
47
46
displayName : 'Gemini 1.0 Pro 001 (Tuning)' ,
48
- hidden : true ,
49
47
id : 'gemini-1.0-pro-001' ,
50
48
maxOutput : 2048 ,
51
49
tokens : 32_768 ,
@@ -54,7 +52,6 @@ const Google: ModelProviderCard = {
54
52
description :
55
53
'The best model for scaling across a wide range of tasks. This is the latest model.' ,
56
54
displayName : 'Gemini 1.0 Pro Latest' ,
57
- hidden : true ,
58
55
id : 'gemini-1.0-pro-latest' ,
59
56
maxOutput : 2048 ,
60
57
tokens : 32_768 ,
0 commit comments