a
    Of]                     @  s  d Z ddlmZ ddlZddlZddlZddlmZmZm	Z	 ddl
Z
ddl
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlZddlmZmZ ddl m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z) erddl*m+Z+m,Z,m-Z-m.Z.m/Z/ dddddZ0d;dddddddd d!Z1G d"d dZ2G d#d$ d$e2Z3G d%d& d&e2Z4ee!d' d(d<d+d,dd-d.dd/dd0d1	d2d3Z5ee!d' d(d)ddej6ej6ddfd4dd/dd5d6dd7d+d8	d9d:Z7dS )=z parquet compat     )annotationsN)TYPE_CHECKINGAnyLiteral)catch_warnings)using_pyarrow_string_dtype)_get_option)lib)import_optional_dependencyAbstractMethodError)doc)find_stack_level)check_dtype_backend)	DataFrame
get_option)_shared_docs)arrow_string_types_mapper)	IOHandles
get_handleis_fsspec_urlis_urlstringify_path)DtypeBackendFilePath
ReadBufferStorageOptionsWriteBufferstrBaseImpl)enginereturnc                 C  s   | dkrt d} | dkr~ttg}d}|D ]F}z| W   S  tyl } z|dt| 7 }W Y d}~q(d}~0 0 q(td| | dkrt S | dkrt S td	dS )
zreturn our implementationautozio.parquet.engine z
 - NzUnable to find a usable engine; tried using: 'pyarrow', 'fastparquet'.
A suitable version of pyarrow or fastparquet is required for parquet support.
Trying to import the above resulted in these errors:pyarrowfastparquetz.engine must be one of 'pyarrow', 'fastparquet')r   PyArrowImplFastParquetImplImportErrorr   
ValueError)r    Zengine_classesZ
error_msgsZengine_classerr r+   J/var/www/ai-form-bot/venv/lib/python3.9/site-packages/pandas/io/parquet.py
get_engine3   s(    (
r-   rbFz1FilePath | ReadBuffer[bytes] | WriteBuffer[bytes]r   StorageOptions | NoneboolzVtuple[FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any])pathfsstorage_optionsmodeis_dirr!   c           
   	   C  sj  t | }|durvtddd}tddd}|durJt||jrJ|rvtdn,|durbt||jjrbntdt|j	 t
|r|du r|du rtd}td}z|j| \}}W n t|jfy   Y n0 |du rtd}|jj|fi |pi \}}n"|rt|r|d	krtd
d}	|s`|s`t|tr`tj|s`t||d|d}	d}|	j}||	|fS )zFile handling for PyArrow.Nz
pyarrow.fsignore)errorsfsspecz8storage_options not supported with a pyarrow FileSystem.z9filesystem must be a pyarrow or fsspec FileSystem, not a r$   r.   z8storage_options passed with buffer, or non-supported URLFZis_textr3   )r   r
   
isinstanceZ
FileSystemNotImplementedErrorspecZAbstractFileSystemr)   type__name__r   Zfrom_uri	TypeErrorZArrowInvalidcoreZ	url_to_fsr   r   osr1   isdirr   handle)
r1   r2   r3   r4   r5   path_or_handleZpa_fsr8   pahandlesr+   r+   r,   _get_path_or_handleU   s`    

	rG   c                   @  s>   e Zd ZedddddZddddZddd
ddZd	S )r   r   None)dfr!   c                 C  s   t | tstdd S )Nz+to_parquet only supports IO with DataFrames)r:   r   r)   rI   r+   r+   r,   validate_dataframe   s    
zBaseImpl.validate_dataframerJ   c                 K  s   t | d S Nr   )selfrI   r1   compressionkwargsr+   r+   r,   write   s    zBaseImpl.writeNr!   c                 K  s   t | d S rL   r   )rM   r1   columnsrO   r+   r+   r,   read   s    zBaseImpl.read)N)r>   
__module____qualname__staticmethodrK   rP   rS   r+   r+   r+   r,   r      s   c                	   @  sZ   e Zd ZddddZdddd	d
ddddddZdddejddfdddddddZdS )r&   rH   rQ   c                 C  s&   t ddd dd l}dd l}|| _d S )Nr$   z(pyarrow is required for parquet support.extrar   )r
   Zpyarrow.parquetZ(pandas.core.arrays.arrow.extension_typesapi)rM   r$   pandasr+   r+   r,   __init__   s    zPyArrowImpl.__init__snappyNr   zFilePath | WriteBuffer[bytes]
str | Nonebool | Noner/   list[str] | None)rI   r1   rN   indexr3   partition_colsr!   c                 K  sN  |  | d|dd i}	|d ur*||	d< | jjj|fi |	}
|jrtdt|ji}|
jj	}i ||}|

|}
t|||d|d ud\}}}t|tjrt|drt|jttfrt|jtr|j }n|j}z`|d ur| jjj|
|f|||d| n| jjj|
|f||d| W |d urJ|  n|d urH|  0 d S )	NschemaZpreserve_indexZPANDAS_ATTRSwb)r3   r4   r5   name)rN   ra   
filesystem)rN   re   )rK   poprY   TableZfrom_pandasattrsjsondumpsrb   metadataZreplace_schema_metadatarG   r:   ioBufferedWriterhasattrrd   r   bytesdecodeparquetZwrite_to_datasetZwrite_tableclose)rM   rI   r1   rN   r`   r3   ra   re   rO   Zfrom_pandas_kwargstabledf_metadataZexisting_metadataZmerged_metadatarD   rF   r+   r+   r,   rP      sf    







zPyArrowImpl.writeFr0   DtypeBackend | lib.NoDefault)use_nullable_dtypesdtype_backendr3   r!   c                 K  s,  d|d< i }	|dkr2ddl m}
 |
 }|j|	d< n$|dkrFtj|	d< nt rVt |	d< tddd	}|d
krrd|	d< t|||dd\}}}z| j	j
j|f|||d|}|jf i |	}|d
kr|jd
dd}|jjrd|jjv r|jjd }t||_|W |d ur|  S n|d ur&|  0 d S )NTZuse_pandas_metadataZnumpy_nullabler   )_arrow_dtype_mappingZtypes_mapperr$   zmode.data_manager)ZsilentarrayZsplit_blocksr.   )r3   r4   )rR   re   filtersF)copys   PANDAS_ATTRS)pandas.io._utilrx   getpdZ
ArrowDtyper   r   r   rG   rY   rq   Z
read_table	to_pandasZ_as_managerrb   rk   ri   loadsrh   rr   )rM   r1   rR   rz   rv   rw   r3   re   rO   Zto_pandas_kwargsrx   mappingmanagerrD   rF   Zpa_tableresultrt   r+   r+   r,   rS      sX    

 
zPyArrowImpl.read)r\   NNNN)r>   rT   rU   r[   rP   r	   
no_defaultrS   r+   r+   r+   r,   r&      s        Er&   c                   @  sB   e Zd ZddddZdddd	dd
ddZdd	ddddZdS )r'   rH   rQ   c                 C  s   t ddd}|| _d S )Nr%   z,fastparquet is required for parquet support.rW   )r
   rY   )rM   r%   r+   r+   r,   r[   )  s    zFastParquetImpl.__init__r\   Nr   z*Literal['snappy', 'gzip', 'brotli'] | Noner/   )rI   rN   r3   r!   c           	        s   |  | d|v r"|d ur"tdd|v r4|d}|d urDd|d< |d urTtdt|}t|rtd  fdd|d	< nrtd
tdd. | jj	||f|||d| W d    n1 s0    Y  d S )Npartition_onzYCannot use both partition_on and partition_cols. Use partition_cols for partitioning dataZhiveZfile_scheme9filesystem is not implemented for the fastparquet engine.r8   c                   s    j | dfi pi   S )Nrc   )open)r1   _r8   r3   r+   r,   <lambda>T  s   z'FastParquetImpl.write.<locals>.<lambda>Z	open_withz?storage_options passed with file object or non-fsspec file pathT)record)rN   Zwrite_indexr   )
rK   r)   rf   r;   r   r   r
   r   rY   rP   )	rM   rI   r1   rN   r`   ra   r3   re   rO   r+   r   r,   rP   1  s@    

zFastParquetImpl.write)r3   r!   c                 K  s  i }| dd}| dtj}	d|d< |r2td|	tjurDtd|d urTtdt|}d }
t|rtd}|j|d	fi |pi j	|d
< n,t
|trtj|st|d	d|d}
|
j}z>| jj|fi |}|jf ||d|W |
d ur|
  S n|
d ur|
  0 d S )Nrv   Frw   Zpandas_nullszNThe 'use_nullable_dtypes' argument is not supported for the fastparquet enginezHThe 'dtype_backend' argument is not supported for the fastparquet enginer   r8   r.   r2   r9   )rR   rz   )rf   r	   r   r)   r;   r   r   r
   r   r2   r:   r   rA   r1   rB   r   rC   rY   ZParquetFiler   rr   )rM   r1   rR   rz   r3   re   rO   Zparquet_kwargsrv   rw   rF   r8   Zparquet_filer+   r+   r,   rS   f  sH    	
  
zFastParquetImpl.read)r\   NNNN)NNNN)r>   rT   rU   r[   rP   rS   r+   r+   r+   r,   r'   (  s        8    r'   r3   )r3   r"   r\   r   z$FilePath | WriteBuffer[bytes] | Noner]   r^   r_   zbytes | None)	rI   r1   r    rN   r`   r3   ra   re   r!   c                 K  st   t |tr|g}t|}	|du r(t n|}
|	j| |
f|||||d| |du rlt |
tjsdJ |
 S dS dS )a	  
    Write a DataFrame to the parquet format.

    Parameters
    ----------
    df : DataFrame
    path : str, path object, file-like object, or None, default None
        String, path object (implementing ``os.PathLike[str]``), or file-like
        object implementing a binary ``write()`` function. If None, the result is
        returned as bytes. If a string, it will be used as Root Directory path
        when writing a partitioned dataset. The engine fastparquet does not
        accept file-like objects.
    engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
        Parquet library to use. If 'auto', then the option
        ``io.parquet.engine`` is used. The default ``io.parquet.engine``
        behavior is to try 'pyarrow', falling back to 'fastparquet' if
        'pyarrow' is unavailable.

        When using the ``'pyarrow'`` engine and no storage options are provided
        and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
        (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
        Use the filesystem keyword with an instantiated fsspec filesystem
        if you wish to use its implementation.
    compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},
        default 'snappy'. Name of the compression to use. Use ``None``
        for no compression.
    index : bool, default None
        If ``True``, include the dataframe's index(es) in the file output. If
        ``False``, they will not be written to the file.
        If ``None``, similar to ``True`` the dataframe's index(es)
        will be saved. However, instead of being saved as values,
        the RangeIndex will be stored as a range in the metadata so it
        doesn't require much space and is faster. Other indexes will
        be included as columns in the file output.
    partition_cols : str or list, optional, default None
        Column names by which to partition the dataset.
        Columns are partitioned in the order they are given.
        Must be None if path is not a string.
    {storage_options}

    filesystem : fsspec or pyarrow filesystem, default None
        Filesystem object to use when reading the parquet file. Only implemented
        for ``engine="pyarrow"``.

        .. versionadded:: 2.1.0

    kwargs
        Additional keyword arguments passed to the engine

    Returns
    -------
    bytes if no path argument is provided else None
    N)rN   r`   ra   r3   re   )r:   r   r-   rl   BytesIOrP   getvalue)rI   r1   r    rN   r`   r3   ra   re   rO   implZpath_or_bufr+   r+   r,   
to_parquet  s(    A
r   zFilePath | ReadBuffer[bytes]zbool | lib.NoDefaultru   z&list[tuple] | list[list[tuple]] | None)	r1   r    rR   r3   rv   rw   re   rz   r!   c              	   K  sf   t |}	|tjur:d}
|du r&|
d7 }
tj|
tt d nd}t| |	j| f||||||d|S )a  
    Load a parquet object from the file path, returning a DataFrame.

    Parameters
    ----------
    path : str, path object or file-like object
        String, path object (implementing ``os.PathLike[str]``), or file-like
        object implementing a binary ``read()`` function.
        The string could be a URL. Valid URL schemes include http, ftp, s3,
        gs, and file. For file URLs, a host is expected. A local file could be:
        ``file://localhost/path/to/table.parquet``.
        A file URL can also be a path to a directory that contains multiple
        partitioned parquet files. Both pyarrow and fastparquet support
        paths to directories as well as file URLs. A directory path could be:
        ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
    engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
        Parquet library to use. If 'auto', then the option
        ``io.parquet.engine`` is used. The default ``io.parquet.engine``
        behavior is to try 'pyarrow', falling back to 'fastparquet' if
        'pyarrow' is unavailable.

        When using the ``'pyarrow'`` engine and no storage options are provided
        and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
        (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
        Use the filesystem keyword with an instantiated fsspec filesystem
        if you wish to use its implementation.
    columns : list, default=None
        If not None, only these columns will be read from the file.
    {storage_options}

        .. versionadded:: 1.3.0

    use_nullable_dtypes : bool, default False
        If True, use dtypes that use ``pd.NA`` as missing value indicator
        for the resulting DataFrame. (only applicable for the ``pyarrow``
        engine)
        As new dtypes are added that support ``pd.NA`` in the future, the
        output with this option will change to use those dtypes.
        Note: this is an experimental option, and behaviour (e.g. additional
        support dtypes) may change without notice.

        .. deprecated:: 2.0

    dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
        Back-end data type applied to the resultant :class:`DataFrame`
        (still experimental). Behaviour is as follows:

        * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
          (default).
        * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
          DataFrame.

        .. versionadded:: 2.0

    filesystem : fsspec or pyarrow filesystem, default None
        Filesystem object to use when reading the parquet file. Only implemented
        for ``engine="pyarrow"``.

        .. versionadded:: 2.1.0

    filters : List[Tuple] or List[List[Tuple]], default None
        To filter out data.
        Filter syntax: [[(column, op, val), ...],...]
        where op is [==, =, >, >=, <, <=, !=, in, not in]
        The innermost tuples are transposed into a set of filters applied
        through an `AND` operation.
        The outer list combines these sets of filters through an `OR`
        operation.
        A single list of tuples can also be used, meaning that no `OR`
        operation between set of filters is to be conducted.

        Using this argument will NOT result in row-wise filtering of the final
        partitions unless ``engine="pyarrow"`` is also specified.  For
        other engines, filtering is only performed at the partition level, that is,
        to prevent the loading of some row-groups and/or files.

        .. versionadded:: 2.1.0

    **kwargs
        Any additional kwargs are passed to the engine.

    Returns
    -------
    DataFrame

    See Also
    --------
    DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.

    Examples
    --------
    >>> original_df = pd.DataFrame(
    ...     {{"foo": range(5), "bar": range(5, 10)}}
    ...    )
    >>> original_df
       foo  bar
    0    0    5
    1    1    6
    2    2    7
    3    3    8
    4    4    9
    >>> df_parquet_bytes = original_df.to_parquet()
    >>> from io import BytesIO
    >>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))
    >>> restored_df
       foo  bar
    0    0    5
    1    1    6
    2    2    7
    3    3    8
    4    4    9
    >>> restored_df.equals(original_df)
    True
    >>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])
    >>> restored_bar
        bar
    0    5
    1    6
    2    7
    3    8
    4    9
    >>> restored_bar.equals(original_df[['bar']])
    True

    The function uses `kwargs` that are passed directly to the engine.
    In the following example, we use the `filters` argument of the pyarrow
    engine to filter the rows of the DataFrame.

    Since `pyarrow` is the default engine, we can omit the `engine` argument.
    Note that the `filters` argument is implemented by the `pyarrow` engine,
    which can benefit from multithreading and also potentially be more
    economical in terms of memory.

    >>> sel = [("foo", ">", 2)]
    >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)
    >>> restored_part
        foo  bar
    0    3    8
    1    4    9
    zYThe argument 'use_nullable_dtypes' is deprecated and will be removed in a future version.TzFUse dtype_backend='numpy_nullable' instead of use_nullable_dtype=True.)
stacklevelF)rR   rz   r3   rv   rw   re   )	r-   r	   r   warningswarnFutureWarningr   r   rS   )r1   r    rR   r3   rv   rw   re   rz   rO   r   msgr+   r+   r,   read_parquet  s0     
r   )Nr.   F)Nr"   r\   NNNN)8__doc__
__future__r   rl   ri   rA   typingr   r   r   r   r   Zpandas._configr   Zpandas._config.configr   Zpandas._libsr	   Zpandas.compat._optionalr
   Zpandas.errorsr   Zpandas.util._decoratorsr   Zpandas.util._exceptionsr   Zpandas.util._validatorsr   rZ   r~   r   r   Zpandas.core.shared_docsr   r|   r   Zpandas.io.commonr   r   r   r   r   Zpandas._typingr   r   r   r   r   r-   rG   r   r&   r'   r   r   r   r+   r+   r+   r,   <module>   sb   	%   ? q       "X