Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CI: remove failing line #24092

Merged
merged 9 commits into from
Dec 4, 2018
Merged
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions asv_bench/benchmarks/algorithms.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import warnings
from importlib import import_module

import numpy as np

import pandas as pd
from pandas.util import testing as tm


for imp in ['pandas.util', 'pandas.tools.hashing']:
try:
hashing = import_module(imp)
@@ -73,10 +74,6 @@ def setup(self):
self.uniques = tm.makeStringIndex(1000).values
self.all = self.uniques.repeat(10)

def time_match_string(self):
with warnings.catch_warnings(record=True):
pd.match(self.all, self.uniques)


class Hashing(object):

9 changes: 3 additions & 6 deletions asv_bench/benchmarks/frame_methods.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import string
import warnings

import numpy as np

from pandas import (
DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range)
import pandas.util.testing as tm
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
isnull, NaT)


class GetNumericData(object):
@@ -61,9 +61,6 @@ def time_reindex_axis1(self):
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx)

def time_reindex_both_axes_ix(self):
self.df.ix[self.idx, self.idx]

def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))

14 changes: 8 additions & 6 deletions asv_bench/benchmarks/groupby.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import warnings
from string import ascii_letters
from itertools import product
from functools import partial
from itertools import product
from string import ascii_letters
import warnings

import numpy as np
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
TimeGrouper, Categorical, Timestamp)

from pandas import (
Categorical, DataFrame, MultiIndex, Series, TimeGrouper, Timestamp,
date_range, period_range)
import pandas.util.testing as tm


@@ -210,7 +212,7 @@ def time_multi_int_nunique(self, df):

class AggFunctions(object):

def setup_cache():
def setup_cache(self):
N = 10**5
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
10 changes: 1 addition & 9 deletions pandas/core/sparse/scipy_sparse.py
Original file line number Diff line number Diff line change
@@ -58,15 +58,7 @@ def _get_label_to_i_dict(labels, sort_labels=False):
return (d)

def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def robust_get_level_values(i):
# if index has labels (that are not None) use those,
# else use the level location
try:
return index.get_level_values(index.names[i])
except KeyError:
return index.get_level_values(i)

ilabels = list(zip(*[robust_get_level_values(i) for i in subset]))
ilabels = list(zip(*[index._get_level_values(i) for i in subset]))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is OK to do... _sparse_series_to_coo is the only caller, and it uses _get_level_number(x) to go from maybe labels to numbers.

labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
11 changes: 1 addition & 10 deletions pandas/io/formats/html.py
Original file line number Diff line number Diff line change
@@ -5,7 +5,6 @@

from __future__ import print_function

from distutils.version import LooseVersion
from textwrap import dedent

from pandas.compat import OrderedDict, lzip, map, range, u, unichr, zip
@@ -161,15 +160,7 @@ def write_result(self, buf):
_classes.extend(self.classes)

if self.notebook:
div_style = ''
try:
import IPython
Copy link
Contributor Author

@TomAugspurger TomAugspurger Dec 4, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The import IPython was causing a failure. I figure we're OK to drop IPython 2.x compat code :)

if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except (ImportError, AttributeError):
pass

self.write('<div{style}>'.format(style=div_style))
self.write('<div>')

self.write_style()

5 changes: 5 additions & 0 deletions pandas/tests/io/conftest.py
Original file line number Diff line number Diff line change
@@ -37,6 +37,11 @@ def s3_resource(tips_file, jsonl_file):
"""
pytest.importorskip('s3fs')
boto3 = pytest.importorskip('boto3')
# GH-24092. See if boto.plugin skips the test or fails.
try:
pytest.importorskip("boto.plugin")
except AttributeError:
raise pytest.skip("moto/moto error")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wow, that is indeed nasty, haha.

moto = pytest.importorskip('moto')

test_s3_files = [
14 changes: 14 additions & 0 deletions pandas/tests/sparse/frame/test_to_from_scipy.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import SparseDataFrame, SparseSeries
from pandas.core.sparse.api import SparseDtype
@@ -168,3 +169,16 @@ def test_from_scipy_fillna(spmatrix):
expected[col].fill_value = -1

tm.assert_sp_frame_equal(sdf, expected)


def test_index_names_multiple_nones():
# https://github.com/pandas-dev/pandas/pull/24092
sparse = pytest.importorskip("scipy.sparse")

s = (pd.Series(1, index=pd.MultiIndex.from_product([['A', 'B'], [0, 1]]))
.to_sparse())
result, _, _ = s.to_coo()
assert isinstance(result, sparse.coo_matrix)
result = result.toarray()
expected = np.ones((2, 2), dtype="int64")
tm.assert_numpy_array_equal(result, expected)
39 changes: 37 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -361,5 +361,40 @@ skip=
pandas/tests/computation/test_compat.py,
pandas/tests/computation/test_eval.py,
pandas/types/common.py,
pandas/tests/extension/arrow/test_bool.py
doc/source/conf.py
pandas/tests/extension/arrow/test_bool.py,
doc/source/conf.py,
asv_bench/benchmarks/algorithms.py,
asv_bench/benchmarks/attrs_caching.py,
asv_bench/benchmarks/binary_ops.py,
asv_bench/benchmarks/categoricals.py,
asv_bench/benchmarks/ctors.py,
asv_bench/benchmarks/eval.py,
asv_bench/benchmarks/frame_ctor.py,
asv_bench/benchmarks/frame_methods.py,
asv_bench/benchmarks/gil.py,
asv_bench/benchmarks/groupby.py,
asv_bench/benchmarks/index_object.py,
asv_bench/benchmarks/indexing.py,
asv_bench/benchmarks/inference.py,
asv_bench/benchmarks/io/csv.py,
asv_bench/benchmarks/io/excel.py,
asv_bench/benchmarks/io/hdf.py,
asv_bench/benchmarks/io/json.py,
asv_bench/benchmarks/io/msgpack.py,
asv_bench/benchmarks/io/pickle.py,
asv_bench/benchmarks/io/sql.py,
asv_bench/benchmarks/io/stata.py,
asv_bench/benchmarks/join_merge.py,
asv_bench/benchmarks/multiindex_object.py,
asv_bench/benchmarks/panel_ctor.py,
asv_bench/benchmarks/panel_methods.py,
asv_bench/benchmarks/plotting.py,
asv_bench/benchmarks/reindex.py,
asv_bench/benchmarks/replace.py,
asv_bench/benchmarks/reshape.py,
asv_bench/benchmarks/rolling.py,
asv_bench/benchmarks/series_methods.py,
asv_bench/benchmarks/sparse.py,
asv_bench/benchmarks/stat_ops.py,
asv_bench/benchmarks/timeseries.py