From 4775cac0181101c1890d56438436efc1430d1a43 Mon Sep 17 00:00:00 2001 From: Ming Li Date: Sun, 27 May 2018 14:23:26 +0100 Subject: [PATCH] refactor tests --- pandas/tests/frame/test_to_csv.py | 16 ---------------- pandas/tests/io/json/test_compression.py | 18 ------------------ pandas/tests/io/test_pickle.py | 15 --------------- pandas/tests/series/test_io.py | 14 -------------- pandas/tests/test_common.py | 23 ++++++++++++++++++++++- 5 files changed, 22 insertions(+), 64 deletions(-) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index cfcb9d1257a86..e4829ebf48561 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -943,22 +943,6 @@ def test_to_csv_compression(self, compression): with tm.decompress_file(filename, compression) as fh: assert_frame_equal(df, read_csv(fh, index_col=0)) - def test_to_csv_compression_size(self, compression): - - df = pd.concat(100 * [DataFrame([[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z'])]) - - with ensure_clean() as filename: - import os - df.to_csv(filename, compression=compression) - file_size = os.path.getsize(filename) - - if compression: - df.to_csv(filename, compression=None) - uncompressed_file_size = os.path.getsize(filename) - assert uncompressed_file_size > file_size - def test_to_csv_date_format(self): with ensure_clean('__tmp_to_csv_date_format__') as path: dt_index = self.tsframe.index diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index c425425c21ecc..c9074ca49e5be 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -21,24 +21,6 @@ def test_compression_roundtrip(compression): assert_frame_equal(df, pd.read_json(result)) -def test_to_json_compression_size(compression): - - df = pd.concat(100 * [pd.DataFrame([[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z'])], - ignore_index=True) - - with tm.ensure_clean() as filename: - import os - df.to_json(filename, compression=compression) - file_size = os.path.getsize(filename) - - if compression: - df.to_json(filename, compression=None) - uncompressed_file_size = os.path.getsize(filename) - assert uncompressed_file_size > file_size - - def test_read_zipped_json(): uncompressed_path = tm.get_data_path("tsframe_v012.json") uncompressed_df = pd.read_json(uncompressed_path) diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 05bdb3f5d2e7d..fbe2174e603e2 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -457,21 +457,6 @@ def test_read_infer(self, ext, get_random_path): tm.assert_frame_equal(df, df2) - def test_compression_size(self, compression): - - df = pd.concat(100 * [pd.DataFrame([[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z'])]) - - with tm.ensure_clean() as filename: - df.to_pickle(filename, compression=compression) - file_size = os.path.getsize(filename) - - if compression: - df.to_pickle(filename, compression=None) - uncompressed_file_size = os.path.getsize(filename) - assert uncompressed_file_size > file_size - # --------------------- # test pickle compression diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index f3c9803ffb79e..e369dfda6deac 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -161,20 +161,6 @@ def test_to_csv_compression(self, compression): index_col=0, squeeze=True)) - def test_to_csv_compression_size(self, compression): - - s = Series(100 * [0.123456, 0.234567, 0.567567], name='X') - - with ensure_clean() as filename: - import os - s.to_csv(filename, compression=compression, header=True) - file_size = os.path.getsize(filename) - - if compression: - s.to_csv(filename, compression=None, header=True) - uncompressed_file_size = os.path.getsize(filename) - assert uncompressed_file_size > file_size - class TestSeriesIO(TestData): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 0b329f64dafa3..28ddf9e0204c3 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,12 +1,14 @@ # -*- coding: utf-8 -*- import pytest +import os import collections from functools import partial import numpy as np -from pandas import Series, Timestamp +import pandas as pd +from pandas import Series, DataFrame, Timestamp from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops @@ -222,3 +224,22 @@ def test_standardize_mapping(): dd = collections.defaultdict(list) assert isinstance(com.standardize_mapping(dd), partial) + + +@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) +def test_compression_size(method, compression): + + df = pd.concat(100 * [DataFrame([[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z'])], + ignore_index=True) + s = df.iloc[:, 0] + + with tm.ensure_clean() as filename: + for obj in [df, s]: + getattr(obj, method)(filename, compression=compression) + file_size = os.path.getsize(filename) + getattr(obj, method)(filename, compression=None) + uncompressed_file_size = os.path.getsize(filename) + if compression: + assert uncompressed_file_size > file_size