Skip to content

Commit ba77219

Browse files
author
David Robertson
committed
Rework docstrings
The data to be encoded doesn't have to be a dictionary.
1 parent 2411f2f commit ba77219

File tree

1 file changed

+25
-36
lines changed

1 file changed

+25
-36
lines changed

canonicaljson.py

Lines changed: 25 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -89,55 +89,44 @@ def set_json_library(json_lib: JsonLibrary) -> None:
8989
)
9090

9191

92-
def encode_canonical_json(json_object: object) -> bytes:
93-
"""Encodes the shortest UTF-8 JSON encoding with dictionary keys
94-
lexicographically sorted by unicode code point.
95-
96-
Args:
97-
json_object (dict): The JSON object to encode.
92+
def encode_canonical_json(data: object) -> bytes:
93+
"""Encodes the given `data` as a UTF-8 canonical JSON bytestring.
9894
99-
Returns:
100-
bytes encoding the JSON object"""
101-
s = _canonical_encoder.encode(json_object)
95+
This encoding is the shortest possible. Dictionary keys are
96+
lexicographically sorted by unicode code point.
97+
"""
98+
s = _canonical_encoder.encode(data)
10299
return s.encode("utf-8")
103100

104101

105-
def iterencode_canonical_json(json_object: object) -> Generator[bytes, None, None]:
106-
"""Encodes the shortest UTF-8 JSON encoding with dictionary keys
107-
lexicographically sorted by unicode code point.
108-
109-
Args:
110-
json_object (dict): The JSON object to encode.
102+
def iterencode_canonical_json(data: object) -> Generator[bytes, None, None]:
103+
"""Iteratively encodes the given `data` as a UTF-8 canonical JSON bytestring.
111104
112-
Returns:
113-
generator which yields bytes encoding the JSON object"""
114-
for chunk in _canonical_encoder.iterencode(json_object):
115-
yield chunk.encode("utf-8")
105+
This yields one or more bytestrings; concatenating them all together yields the
106+
full encoding of `data`. Building up the encoding gradually in this way allows us to
107+
encode large pieces of `data` without blocking other tasks.
116108
117-
118-
def encode_pretty_printed_json(json_object: object) -> bytes:
109+
This encoding is the shortest possible. Dictionary keys are
110+
lexicographically sorted by unicode code point.
119111
"""
120-
Encodes the JSON object dict as human readable UTF-8 bytes.
121-
122-
Args:
123-
json_object (dict): The JSON object to encode.
124-
125-
Returns:
126-
bytes encoding the JSON object"""
112+
for chunk in _canonical_encoder.iterencode(data):
113+
yield chunk.encode("utf-8")
127114

128-
return _pretty_encoder.encode(json_object).encode("utf-8")
129115

116+
def encode_pretty_printed_json(data: object) -> bytes:
117+
"""Encodes the given `data` as a UTF-8 human-readable JSON bytestring."""
130118

131-
def iterencode_pretty_printed_json(json_object: object) -> Generator[bytes, None, None]:
132-
"""Encodes the JSON object dict as human readable UTF-8 bytes.
119+
return _pretty_encoder.encode(data).encode("utf-8")
133120

134-
Args:
135-
json_object (dict): The JSON object to encode.
136121

137-
Returns:
138-
generator which yields bytes encoding the JSON object"""
122+
def iterencode_pretty_printed_json(data: object) -> Generator[bytes, None, None]:
123+
"""Iteratively encodes the given `data` as a UTF-8 human-readable JSON bytestring.
139124
140-
for chunk in _pretty_encoder.iterencode(json_object):
125+
This yields one or more bytestrings; concatenating them all together yields the
126+
full encoding of `data`. Building up the encoding gradually in this way allows us to
127+
encode large pieces of `data` without blocking other tasks.
128+
"""
129+
for chunk in _pretty_encoder.iterencode(data):
141130
yield chunk.encode("utf-8")
142131

143132

0 commit comments

Comments
 (0)