Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BUG: Fix precision loss in read_json #59284

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
3 changes: 2 additions & 1 deletion pandas/io/json/_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,6 +1168,7 @@ def _try_convert_data(
"""
Try to parse a Series into a column by inferring dtype.
"""
org_data = data
# don't try to coerce, unless a force conversion
if use_dtypes:
if not self.dtype:
Expand Down Expand Up @@ -1222,7 +1223,7 @@ def _try_convert_data(
if len(data) and data.dtype in ("float", "object"):
# coerce ints if we can
try:
new_data = data.astype("int64")
new_data = org_data.astype("int64")
if (new_data == data).all():
data = new_data
converted = True
Expand Down
12 changes: 12 additions & 0 deletions pandas/tests/io/json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -2286,3 +2286,15 @@ def test_read_json_lines_rangeindex():
result = read_json(StringIO(data), lines=True).index
expected = RangeIndex(2)
tm.assert_index_equal(result, expected, exact=True)


def test_large_number():
assert (
read_json(
StringIO('["9999999999999999"]'),
orient="values",
typ="series",
convert_dates=False,
)[0]
== 9999999999999999
)