-
-
Save dat-vikash/e8f0bb9af99b20565c4fa7c22588fcc1 to your computer and use it in GitHub Desktop.
Revisions
-
joshlk revised this gist
Apr 8, 2016 . 1 changed file with 1 addition and 1 deletion.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -6,7 +6,7 @@ def _map_to_pandas(rdds): def toPandas(df, n_partitions=None): """ Returns the contents of `df` as a local `pandas.DataFrame` in a speedy fashion. The DataFrame is repartitioned if `n_partitions` is passed. :param df: pyspark.sql.DataFrame :param n_partitions: int or None -
joshlk revised this gist
Apr 8, 2016 . 1 changed file with 1 addition and 5 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -12,12 +12,8 @@ def toPandas(df, n_partitions=None): :param n_partitions: int or None :return: pandas.DataFrame """ if n_partitions is not None: df = df.repartition(n_partitions) df_pand = df.rdd.mapPartitions(_map_to_pandas).collect() df_pand = pd.concat(df_pand) df_pand.columns = df.columns return df_pand -
joshlk created this gist
Mar 22, 2016 .There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,23 @@ import pandas as pd def _map_to_pandas(rdds): """ Needs to be here due to pickling issues """ return [pd.DataFrame(list(rdds))] def toPandas(df, n_partitions=None): """ Returns the contents of this `DataFrame` as Pandas `pandas.DataFrame` in a speedy fashion. The DataFrame is repartitioned if `n_partitions` is passed. :param df: pyspark.sql.DataFrame :param n_partitions: int or None :return: pandas.DataFrame """ if n_partitions is not None: df = df.repartition(n_partitions) df_pand = df.rdd.mapPartitions(_map_to_pandas).collect() df_pand = pd.concat(df_pand) df_pand.columns = df.columns return df_pand