Skip to content

Commit 03b1972

Browse files
committed
chore: fix formatting
1 parent e2601a0 commit 03b1972

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+778
-1069
lines changed

apps/web/app/api/generate/route.ts

+56-62
Original file line numberDiff line numberDiff line change
@@ -1,141 +1,135 @@
1-
import OpenAI from "openai";
2-
import { OpenAIStream, StreamingTextResponse } from "ai";
3-
import { kv } from "@vercel/kv";
4-
import { Ratelimit } from "@upstash/ratelimit";
5-
import { match } from "ts-pattern";
6-
import type { ChatCompletionMessageParam } from "openai/resources/index.mjs";
1+
import OpenAI from 'openai';
2+
import { OpenAIStream, StreamingTextResponse } from 'ai';
3+
import { kv } from '@vercel/kv';
4+
import { Ratelimit } from '@upstash/ratelimit';
5+
import { match } from 'ts-pattern';
6+
import type { ChatCompletionMessageParam } from 'openai/resources/index.mjs';
77

88
// Create an OpenAI API client (that's edge friendly!)
99
// Using LLamma's OpenAI client:
1010

1111
// IMPORTANT! Set the runtime to edge: https://vercel.com/docs/functions/edge-functions/edge-runtime
12-
export const runtime = "edge";
12+
export const runtime = 'edge';
1313

1414
const llama = new OpenAI({
15-
apiKey: "ollama",
16-
baseURL: "http://localhost:11434/v1",
15+
apiKey: 'ollama',
16+
baseURL: 'http://localhost:11434/v1',
1717
});
1818

1919
export async function POST(req: Request): Promise<Response> {
2020
const openai = new OpenAI({
2121
apiKey: process.env.OPENAI_API_KEY,
22-
baseURL: process.env.OPENAI_BASE_URL || "https://api.openai.com/v1",
22+
baseURL: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1',
2323
});
2424
// Check if the OPENAI_API_KEY is set, if not return 400
25-
if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === "") {
26-
return new Response(
27-
"Missing OPENAI_API_KEY - make sure to add it to your .env file.",
28-
{
29-
status: 400,
30-
},
31-
);
25+
if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === '') {
26+
return new Response('Missing OPENAI_API_KEY - make sure to add it to your .env file.', {
27+
status: 400,
28+
});
3229
}
3330
if (process.env.KV_REST_API_URL && process.env.KV_REST_API_TOKEN) {
34-
const ip = req.headers.get("x-forwarded-for");
31+
const ip = req.headers.get('x-forwarded-for');
3532
const ratelimit = new Ratelimit({
3633
redis: kv,
37-
limiter: Ratelimit.slidingWindow(50, "1 d"),
34+
limiter: Ratelimit.slidingWindow(50, '1 d'),
3835
});
3936

40-
const { success, limit, reset, remaining } = await ratelimit.limit(
41-
`novel_ratelimit_${ip}`,
42-
);
37+
const { success, limit, reset, remaining } = await ratelimit.limit(`novel_ratelimit_${ip}`);
4338

4439
if (!success) {
45-
return new Response("You have reached your request limit for the day.", {
40+
return new Response('You have reached your request limit for the day.', {
4641
status: 429,
4742
headers: {
48-
"X-RateLimit-Limit": limit.toString(),
49-
"X-RateLimit-Remaining": remaining.toString(),
50-
"X-RateLimit-Reset": reset.toString(),
43+
'X-RateLimit-Limit': limit.toString(),
44+
'X-RateLimit-Remaining': remaining.toString(),
45+
'X-RateLimit-Reset': reset.toString(),
5146
},
5247
});
5348
}
5449
}
5550

5651
const { prompt, option, command } = await req.json();
5752
const messages = match(option)
58-
.with("continue", () => [
53+
.with('continue', () => [
5954
{
60-
role: "system",
55+
role: 'system',
6156
content:
62-
"You are an AI writing assistant that continues existing text based on context from prior text. " +
63-
"Give more weight/priority to the later characters than the beginning ones. " +
64-
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
65-
"Use Markdown formatting when appropriate.",
57+
'You are an AI writing assistant that continues existing text based on context from prior text. ' +
58+
'Give more weight/priority to the later characters than the beginning ones. ' +
59+
'Limit your response to no more than 200 characters, but make sure to construct complete sentences.' +
60+
'Use Markdown formatting when appropriate.',
6661
},
6762
{
68-
role: "user",
63+
role: 'user',
6964
content: prompt,
7065
},
7166
])
72-
.with("improve", () => [
67+
.with('improve', () => [
7368
{
74-
role: "system",
69+
role: 'system',
7570
content:
76-
"You are an AI writing assistant that improves existing text. " +
77-
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
78-
"Use Markdown formatting when appropriate.",
71+
'You are an AI writing assistant that improves existing text. ' +
72+
'Limit your response to no more than 200 characters, but make sure to construct complete sentences.' +
73+
'Use Markdown formatting when appropriate.',
7974
},
8075
{
81-
role: "user",
76+
role: 'user',
8277
content: `The existing text is: ${prompt}`,
8378
},
8479
])
85-
.with("shorter", () => [
80+
.with('shorter', () => [
8681
{
87-
role: "system",
82+
role: 'system',
8883
content:
89-
"You are an AI writing assistant that shortens existing text. " +
90-
"Use Markdown formatting when appropriate.",
84+
'You are an AI writing assistant that shortens existing text. ' + 'Use Markdown formatting when appropriate.',
9185
},
9286
{
93-
role: "user",
87+
role: 'user',
9488
content: `The existing text is: ${prompt}`,
9589
},
9690
])
97-
.with("longer", () => [
91+
.with('longer', () => [
9892
{
99-
role: "system",
93+
role: 'system',
10094
content:
101-
"You are an AI writing assistant that lengthens existing text. " +
102-
"Use Markdown formatting when appropriate.",
95+
'You are an AI writing assistant that lengthens existing text. ' +
96+
'Use Markdown formatting when appropriate.',
10397
},
10498
{
105-
role: "user",
99+
role: 'user',
106100
content: `The existing text is: ${prompt}`,
107101
},
108102
])
109-
.with("fix", () => [
103+
.with('fix', () => [
110104
{
111-
role: "system",
105+
role: 'system',
112106
content:
113-
"You are an AI writing assistant that fixes grammar and spelling errors in existing text. " +
114-
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
115-
"Use Markdown formatting when appropriate.",
107+
'You are an AI writing assistant that fixes grammar and spelling errors in existing text. ' +
108+
'Limit your response to no more than 200 characters, but make sure to construct complete sentences.' +
109+
'Use Markdown formatting when appropriate.',
116110
},
117111
{
118-
role: "user",
112+
role: 'user',
119113
content: `The existing text is: ${prompt}`,
120114
},
121115
])
122-
.with("zap", () => [
116+
.with('zap', () => [
123117
{
124-
role: "system",
118+
role: 'system',
125119
content:
126-
"You area an AI writing assistant that generates text based on a prompt. " +
127-
"You take an input from the user and a command for manipulating the text" +
128-
"Use Markdown formatting when appropriate.",
120+
'You area an AI writing assistant that generates text based on a prompt. ' +
121+
'You take an input from the user and a command for manipulating the text' +
122+
'Use Markdown formatting when appropriate.',
129123
},
130124
{
131-
role: "user",
125+
role: 'user',
132126
content: `For this text: ${prompt}. You have to respect the command: ${command}`,
133127
},
134128
])
135129
.run() as ChatCompletionMessageParam[];
136130

137131
const response = await openai.chat.completions.create({
138-
model: "gpt-3.5-turbo",
132+
model: 'gpt-3.5-turbo',
139133
stream: true,
140134
messages,
141135
temperature: 0.7,

apps/web/app/api/upload/route.ts

+12-17
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,25 @@
1-
import { put } from "@vercel/blob";
2-
import { NextResponse } from "next/server";
1+
import { put } from '@vercel/blob';
2+
import { NextResponse } from 'next/server';
33

4-
export const runtime = "edge";
4+
export const runtime = 'edge';
55

66
export async function POST(req: Request) {
77
if (!process.env.BLOB_READ_WRITE_TOKEN) {
8-
return new Response(
9-
"Missing BLOB_READ_WRITE_TOKEN. Don't forget to add that to your .env file.",
10-
{
11-
status: 401,
12-
},
13-
);
8+
return new Response("Missing BLOB_READ_WRITE_TOKEN. Don't forget to add that to your .env file.", {
9+
status: 401,
10+
});
1411
}
1512

16-
const file = req.body || "";
17-
const filename = req.headers.get("x-vercel-filename") || "file.txt";
18-
const contentType = req.headers.get("content-type") || "text/plain";
19-
const fileType = `.${contentType.split("/")[1]}`;
13+
const file = req.body || '';
14+
const filename = req.headers.get('x-vercel-filename') || 'file.txt';
15+
const contentType = req.headers.get('content-type') || 'text/plain';
16+
const fileType = `.${contentType.split('/')[1]}`;
2017

2118
// construct final filename based on content-type if not provided
22-
const finalName = filename.includes(fileType)
23-
? filename
24-
: `${filename}${fileType}`;
19+
const finalName = filename.includes(fileType) ? filename : `${filename}${fileType}`;
2520
const blob = await put(finalName, file, {
2621
contentType,
27-
access: "public",
22+
access: 'public',
2823
});
2924

3025
return NextResponse.json(blob);

apps/web/app/layout.tsx

+11-12
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
1-
import "@/styles/globals.css";
2-
import "@/styles/prosemirror.css";
1+
import '@/styles/globals.css';
2+
import '@/styles/prosemirror.css';
33

4-
import type { Metadata, Viewport } from "next";
5-
import type { ReactNode } from "react";
6-
import Providers from "./providers";
4+
import type { Metadata, Viewport } from 'next';
5+
import type { ReactNode } from 'react';
6+
import Providers from './providers';
77

8-
const title =
9-
"Novel - Notion-style WYSIWYG editor with AI-powered autocompletions";
8+
const title = 'Novel - Notion-style WYSIWYG editor with AI-powered autocompletions';
109
const description =
11-
"Novel is a Notion-style WYSIWYG editor with AI-powered autocompletions. Built with Tiptap, OpenAI, and Vercel AI SDK.";
10+
'Novel is a Notion-style WYSIWYG editor with AI-powered autocompletions. Built with Tiptap, OpenAI, and Vercel AI SDK.';
1211

1312
export const metadata: Metadata = {
1413
title,
@@ -20,14 +19,14 @@ export const metadata: Metadata = {
2019
twitter: {
2120
title,
2221
description,
23-
card: "summary_large_image",
24-
creator: "@steventey",
22+
card: 'summary_large_image',
23+
creator: '@steventey',
2524
},
26-
metadataBase: new URL("https://novel.sh"),
25+
metadataBase: new URL('https://novel.sh'),
2726
};
2827

2928
export const viewport: Viewport = {
30-
themeColor: "#ffffff",
29+
themeColor: '#ffffff',
3130
};
3231

3332
export default function RootLayout({ children }: { children: ReactNode }) {

apps/web/app/page.tsx

+8-12
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,11 @@
1-
import { Github } from "@/components/tailwind/ui/icons";
2-
import { Button } from "@/components/tailwind/ui/button";
3-
import Menu from "@/components/tailwind/ui/menu";
4-
import Link from "next/link";
5-
import TailwindAdvancedEditor from "@/components/tailwind/advanced-editor";
6-
import {
7-
Dialog,
8-
DialogContent,
9-
DialogTrigger,
10-
} from "@/components/tailwind/ui/dialog";
11-
import { ScrollArea } from "@/components/tailwind/ui/scroll-area";
12-
import { BookOpen } from "lucide-react";
1+
import { Github } from '@/components/tailwind/ui/icons';
2+
import { Button } from '@/components/tailwind/ui/button';
3+
import Menu from '@/components/tailwind/ui/menu';
4+
import Link from 'next/link';
5+
import TailwindAdvancedEditor from '@/components/tailwind/advanced-editor';
6+
import { Dialog, DialogContent, DialogTrigger } from '@/components/tailwind/ui/dialog';
7+
import { ScrollArea } from '@/components/tailwind/ui/scroll-area';
8+
import { BookOpen } from 'lucide-react';
139

1410
export default function Page() {
1511
return (

apps/web/app/providers.tsx

+10-20
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,31 @@
1-
"use client";
1+
'use client';
22

3-
import {
4-
type Dispatch,
5-
type ReactNode,
6-
type SetStateAction,
7-
createContext,
8-
} from "react";
9-
import { ThemeProvider, useTheme } from "next-themes";
10-
import { Toaster } from "sonner";
11-
import { Analytics } from "@vercel/analytics/react";
12-
import useLocalStorage from "@/hooks/use-local-storage";
3+
import { type Dispatch, type ReactNode, type SetStateAction, createContext } from 'react';
4+
import { ThemeProvider, useTheme } from 'next-themes';
5+
import { Toaster } from 'sonner';
6+
import { Analytics } from '@vercel/analytics/react';
7+
import useLocalStorage from '@/hooks/use-local-storage';
138

149
export const AppContext = createContext<{
1510
font: string;
1611
setFont: Dispatch<SetStateAction<string>>;
1712
}>({
18-
font: "Default",
13+
font: 'Default',
1914
setFont: () => {},
2015
});
2116

2217
const ToasterProvider = () => {
2318
const { theme } = useTheme() as {
24-
theme: "light" | "dark" | "system";
19+
theme: 'light' | 'dark' | 'system';
2520
};
2621
return <Toaster theme={theme} />;
2722
};
2823

2924
export default function Providers({ children }: { children: ReactNode }) {
30-
const [font, setFont] = useLocalStorage<string>("novel__font", "Default");
25+
const [font, setFont] = useLocalStorage<string>('novel__font', 'Default');
3126

3227
return (
33-
<ThemeProvider
34-
attribute="class"
35-
enableSystem
36-
disableTransitionOnChange
37-
defaultTheme="system"
38-
>
28+
<ThemeProvider attribute="class" enableSystem disableTransitionOnChange defaultTheme="system">
3929
<AppContext.Provider
4030
value={{
4131
font,

0 commit comments

Comments
 (0)