Skip to content

Commit

Permalink
refactor tests
Browse files Browse the repository at this point in the history
  • Loading branch information
minggli committed May 27, 2018
2 parents 3a29ab3 + 1c2844a commit f12a4e3
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 65 deletions.
2 changes: 1 addition & 1 deletion doc/source/whatsnew/v0.24.0.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
.. _whatsnew_0240:

v0.24.0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-------

.. _whatsnew_0240.enhancements:

Expand Down
16 changes: 0 additions & 16 deletions pandas/tests/frame/test_to_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -943,22 +943,6 @@ def test_to_csv_compression(self, compression):
with tm.decompress_file(filename, compression) as fh:
assert_frame_equal(df, read_csv(fh, index_col=0))

def test_to_csv_compression_size(self, compression):

df = pd.concat(100 * [DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z'])])

with ensure_clean() as filename:
import os
df.to_csv(filename, compression=compression)
file_size = os.path.getsize(filename)

if compression:
df.to_csv(filename, compression=None)
uncompressed_file_size = os.path.getsize(filename)
assert uncompressed_file_size > file_size

def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
dt_index = self.tsframe.index
Expand Down
18 changes: 0 additions & 18 deletions pandas/tests/io/json/test_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,6 @@ def test_compression_roundtrip(compression):
assert_frame_equal(df, pd.read_json(result))


def test_to_json_compression_size(compression):

df = pd.concat(100 * [pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z'])],
ignore_index=True)

with tm.ensure_clean() as filename:
import os
df.to_json(filename, compression=compression)
file_size = os.path.getsize(filename)

if compression:
df.to_json(filename, compression=None)
uncompressed_file_size = os.path.getsize(filename)
assert uncompressed_file_size > file_size


def test_read_zipped_json():
uncompressed_path = tm.get_data_path("tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
Expand Down
15 changes: 0 additions & 15 deletions pandas/tests/io/test_pickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,21 +457,6 @@ def test_read_infer(self, ext, get_random_path):

tm.assert_frame_equal(df, df2)

def test_compression_size(self, compression):

df = pd.concat(100 * [pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z'])])

with tm.ensure_clean() as filename:
df.to_pickle(filename, compression=compression)
file_size = os.path.getsize(filename)

if compression:
df.to_pickle(filename, compression=None)
uncompressed_file_size = os.path.getsize(filename)
assert uncompressed_file_size > file_size


# ---------------------
# test pickle compression
Expand Down
14 changes: 0 additions & 14 deletions pandas/tests/series/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,20 +161,6 @@ def test_to_csv_compression(self, compression):
index_col=0,
squeeze=True))

def test_to_csv_compression_size(self, compression):

s = Series(100 * [0.123456, 0.234567, 0.567567], name='X')

with ensure_clean() as filename:
import os
s.to_csv(filename, compression=compression, header=True)
file_size = os.path.getsize(filename)

if compression:
s.to_csv(filename, compression=None, header=True)
uncompressed_file_size = os.path.getsize(filename)
assert uncompressed_file_size > file_size


class TestSeriesIO(TestData):

Expand Down
29 changes: 28 additions & 1 deletion pandas/tests/test_common.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
# -*- coding: utf-8 -*-

import pytest
import os
import collections
from functools import partial

import numpy as np

from pandas import Series, Timestamp
import pandas as pd
from pandas import Series, DataFrame, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
from pandas.core import ops
Expand Down Expand Up @@ -222,3 +224,28 @@ def test_standardize_mapping():

dd = collections.defaultdict(list)
assert isinstance(com.standardize_mapping(dd), partial)


@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv'])
def test_compression_size(method, compression):

df = pd.concat(100 * [DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z'])],
ignore_index=True)
s = df.iloc[:, 0]

with tm.ensure_clean() as filename:
getattr(df, method)(filename, compression=compression)
file_size = os.path.getsize(filename)
getattr(df, method)(filename, compression=None)
uncompressed_file_size = os.path.getsize(filename)
if compression:
assert uncompressed_file_size > file_size

getattr(s, method)(filename, compression=compression)
file_size = os.path.getsize(filename)
getattr(s, method)(filename, compression=None)
uncompressed_file_size = os.path.getsize(filename)
if compression:
assert uncompressed_file_size > file_size

0 comments on commit f12a4e3

Please sign in to comment.