mirror of
https://github.com/aykhans/AzSuicideDataVisualization.git
synced 2025-07-03 06:31:28 +00:00
first commit
This commit is contained in:
24
.venv/Lib/site-packages/pandas/io/excel/__init__.py
Normal file
24
.venv/Lib/site-packages/pandas/io/excel/__init__.py
Normal file
@ -0,0 +1,24 @@
|
||||
from pandas.io.excel._base import (
|
||||
ExcelFile,
|
||||
ExcelWriter,
|
||||
read_excel,
|
||||
)
|
||||
from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
|
||||
from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
|
||||
from pandas.io.excel._util import register_writer
|
||||
from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
|
||||
from pandas.io.excel._xlwt import XlwtWriter as _XlwtWriter
|
||||
|
||||
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
|
||||
|
||||
|
||||
register_writer(_OpenpyxlWriter)
|
||||
|
||||
|
||||
register_writer(_XlwtWriter)
|
||||
|
||||
|
||||
register_writer(_XlsxWriter)
|
||||
|
||||
|
||||
register_writer(_ODSWriter)
|
1505
.venv/Lib/site-packages/pandas/io/excel/_base.py
Normal file
1505
.venv/Lib/site-packages/pandas/io/excel/_base.py
Normal file
File diff suppressed because it is too large
Load Diff
233
.venv/Lib/site-packages/pandas/io/excel/_odfreader.py
Normal file
233
.venv/Lib/site-packages/pandas/io/excel/_odfreader.py
Normal file
@ -0,0 +1,233 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
|
||||
class ODFReader(BaseExcelReader):
|
||||
"""
|
||||
Read tables out of OpenDocument formatted files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path to be parsed or
|
||||
an open readable stream.
|
||||
storage_options : dict, optional
|
||||
passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions = None,
|
||||
):
|
||||
import_optional_dependency("odf")
|
||||
super().__init__(filepath_or_buffer, storage_options=storage_options)
|
||||
|
||||
@property
|
||||
def _workbook_class(self):
|
||||
from odf.opendocument import OpenDocument
|
||||
|
||||
return OpenDocument
|
||||
|
||||
def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
|
||||
from odf.opendocument import load
|
||||
|
||||
return load(filepath_or_buffer)
|
||||
|
||||
@property
|
||||
def empty_value(self) -> str:
|
||||
"""Property for compat with other readers."""
|
||||
return ""
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
"""Return a list of sheet names present in the document"""
|
||||
from odf.table import Table
|
||||
|
||||
tables = self.book.getElementsByType(Table)
|
||||
return [t.getAttribute("name") for t in tables]
|
||||
|
||||
def get_sheet_by_index(self, index: int):
|
||||
from odf.table import Table
|
||||
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
tables = self.book.getElementsByType(Table)
|
||||
return tables[index]
|
||||
|
||||
def get_sheet_by_name(self, name: str):
|
||||
from odf.table import Table
|
||||
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
tables = self.book.getElementsByType(Table)
|
||||
|
||||
for table in tables:
|
||||
if table.getAttribute("name") == name:
|
||||
return table
|
||||
|
||||
self.close()
|
||||
raise ValueError(f"sheet {name} not found")
|
||||
|
||||
def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
|
||||
"""
|
||||
Parse an ODF Table into a list of lists
|
||||
"""
|
||||
from odf.table import (
|
||||
CoveredTableCell,
|
||||
TableCell,
|
||||
TableRow,
|
||||
)
|
||||
|
||||
covered_cell_name = CoveredTableCell().qname
|
||||
table_cell_name = TableCell().qname
|
||||
cell_names = {covered_cell_name, table_cell_name}
|
||||
|
||||
sheet_rows = sheet.getElementsByType(TableRow)
|
||||
empty_rows = 0
|
||||
max_row_len = 0
|
||||
|
||||
table: list[list[Scalar]] = []
|
||||
|
||||
for sheet_row in sheet_rows:
|
||||
sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names]
|
||||
empty_cells = 0
|
||||
table_row: list[Scalar] = []
|
||||
|
||||
for sheet_cell in sheet_cells:
|
||||
if sheet_cell.qname == table_cell_name:
|
||||
value = self._get_cell_value(sheet_cell, convert_float)
|
||||
else:
|
||||
value = self.empty_value
|
||||
|
||||
column_repeat = self._get_column_repeat(sheet_cell)
|
||||
|
||||
# Queue up empty values, writing only if content succeeds them
|
||||
if value == self.empty_value:
|
||||
empty_cells += column_repeat
|
||||
else:
|
||||
table_row.extend([self.empty_value] * empty_cells)
|
||||
empty_cells = 0
|
||||
table_row.extend([value] * column_repeat)
|
||||
|
||||
if max_row_len < len(table_row):
|
||||
max_row_len = len(table_row)
|
||||
|
||||
row_repeat = self._get_row_repeat(sheet_row)
|
||||
if self._is_empty_row(sheet_row):
|
||||
empty_rows += row_repeat
|
||||
else:
|
||||
# add blank rows to our table
|
||||
table.extend([[self.empty_value]] * empty_rows)
|
||||
empty_rows = 0
|
||||
for _ in range(row_repeat):
|
||||
table.append(table_row)
|
||||
|
||||
# Make our table square
|
||||
for row in table:
|
||||
if len(row) < max_row_len:
|
||||
row.extend([self.empty_value] * (max_row_len - len(row)))
|
||||
|
||||
return table
|
||||
|
||||
def _get_row_repeat(self, row) -> int:
|
||||
"""
|
||||
Return number of times this row was repeated
|
||||
Repeating an empty row appeared to be a common way
|
||||
of representing sparse rows in the table.
|
||||
"""
|
||||
from odf.namespaces import TABLENS
|
||||
|
||||
return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
|
||||
|
||||
def _get_column_repeat(self, cell) -> int:
|
||||
from odf.namespaces import TABLENS
|
||||
|
||||
return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
|
||||
|
||||
def _is_empty_row(self, row) -> bool:
|
||||
"""
|
||||
Helper function to find empty rows
|
||||
"""
|
||||
for column in row.childNodes:
|
||||
if len(column.childNodes) > 0:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
|
||||
from odf.namespaces import OFFICENS
|
||||
|
||||
if str(cell) == "#N/A":
|
||||
return np.nan
|
||||
|
||||
cell_type = cell.attributes.get((OFFICENS, "value-type"))
|
||||
if cell_type == "boolean":
|
||||
if str(cell) == "TRUE":
|
||||
return True
|
||||
return False
|
||||
if cell_type is None:
|
||||
return self.empty_value
|
||||
elif cell_type == "float":
|
||||
# GH5394
|
||||
cell_value = float(cell.attributes.get((OFFICENS, "value")))
|
||||
if convert_float:
|
||||
val = int(cell_value)
|
||||
if val == cell_value:
|
||||
return val
|
||||
return cell_value
|
||||
elif cell_type == "percentage":
|
||||
cell_value = cell.attributes.get((OFFICENS, "value"))
|
||||
return float(cell_value)
|
||||
elif cell_type == "string":
|
||||
return self._get_cell_string_value(cell)
|
||||
elif cell_type == "currency":
|
||||
cell_value = cell.attributes.get((OFFICENS, "value"))
|
||||
return float(cell_value)
|
||||
elif cell_type == "date":
|
||||
cell_value = cell.attributes.get((OFFICENS, "date-value"))
|
||||
return pd.to_datetime(cell_value)
|
||||
elif cell_type == "time":
|
||||
stamp = pd.to_datetime(str(cell))
|
||||
# error: Item "str" of "Union[float, str, NaTType]" has no attribute "time"
|
||||
return stamp.time() # type: ignore[union-attr]
|
||||
else:
|
||||
self.close()
|
||||
raise ValueError(f"Unrecognized type {cell_type}")
|
||||
|
||||
def _get_cell_string_value(self, cell) -> str:
|
||||
"""
|
||||
Find and decode OpenDocument text:s tags that represent
|
||||
a run length encoded sequence of space characters.
|
||||
"""
|
||||
from odf.element import Element
|
||||
from odf.namespaces import TEXTNS
|
||||
from odf.text import S
|
||||
|
||||
text_s = S().qname
|
||||
|
||||
value = []
|
||||
|
||||
for fragment in cell.childNodes:
|
||||
if isinstance(fragment, Element):
|
||||
if fragment.qname == text_s:
|
||||
spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
|
||||
value.append(" " * spaces)
|
||||
else:
|
||||
# recursive impl needed in case of nested fragments
|
||||
# with multiple spaces
|
||||
# https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
|
||||
value.append(self._get_cell_string_value(fragment))
|
||||
else:
|
||||
value.append(str(fragment))
|
||||
return "".join(value)
|
303
.venv/Lib/site-packages/pandas/io/excel/_odswriter.py
Normal file
303
.venv/Lib/site-packages/pandas/io/excel/_odswriter.py
Normal file
@ -0,0 +1,303 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
import datetime
|
||||
from typing import (
|
||||
Any,
|
||||
DefaultDict,
|
||||
)
|
||||
|
||||
import pandas._libs.json as json
|
||||
from pandas._typing import StorageOptions
|
||||
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
from pandas.io.formats.excel import ExcelCell
|
||||
|
||||
|
||||
class ODSWriter(ExcelWriter):
|
||||
engine = "odf"
|
||||
supported_extensions = (".ods",)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
engine: str | None = None,
|
||||
date_format=None,
|
||||
datetime_format=None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions = None,
|
||||
if_sheet_exists: str | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
from odf.opendocument import OpenDocumentSpreadsheet
|
||||
|
||||
if mode == "a":
|
||||
raise ValueError("Append mode is not supported with odf!")
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
|
||||
self.book = OpenDocumentSpreadsheet(**engine_kwargs)
|
||||
self._style_dict: dict[str, str] = {}
|
||||
|
||||
def save(self) -> None:
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
for sheet in self.sheets.values():
|
||||
self.book.spreadsheet.addElement(sheet)
|
||||
self.book.save(self.handles.handle)
|
||||
|
||||
def write_cells(
|
||||
self,
|
||||
cells: list[ExcelCell],
|
||||
sheet_name: str | None = None,
|
||||
startrow: int = 0,
|
||||
startcol: int = 0,
|
||||
freeze_panes: tuple[int, int] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Write the frame cells using odf
|
||||
"""
|
||||
from odf.table import (
|
||||
Table,
|
||||
TableCell,
|
||||
TableRow,
|
||||
)
|
||||
from odf.text import P
|
||||
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
assert sheet_name is not None
|
||||
|
||||
if sheet_name in self.sheets:
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
wks = Table(name=sheet_name)
|
||||
self.sheets[sheet_name] = wks
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
assert freeze_panes is not None
|
||||
self._create_freeze_panes(sheet_name, freeze_panes)
|
||||
|
||||
for _ in range(startrow):
|
||||
wks.addElement(TableRow())
|
||||
|
||||
rows: DefaultDict = defaultdict(TableRow)
|
||||
col_count: DefaultDict = defaultdict(int)
|
||||
|
||||
for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
|
||||
# only add empty cells if the row is still empty
|
||||
if not col_count[cell.row]:
|
||||
for _ in range(startcol):
|
||||
rows[cell.row].addElement(TableCell())
|
||||
|
||||
# fill with empty cells if needed
|
||||
for _ in range(cell.col - col_count[cell.row]):
|
||||
rows[cell.row].addElement(TableCell())
|
||||
col_count[cell.row] += 1
|
||||
|
||||
pvalue, tc = self._make_table_cell(cell)
|
||||
rows[cell.row].addElement(tc)
|
||||
col_count[cell.row] += 1
|
||||
p = P(text=pvalue)
|
||||
tc.addElement(p)
|
||||
|
||||
# add all rows to the sheet
|
||||
for row_nr in range(max(rows.keys()) + 1):
|
||||
wks.addElement(rows[row_nr])
|
||||
|
||||
def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
|
||||
"""Convert cell attributes to OpenDocument attributes
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cell : ExcelCell
|
||||
Spreadsheet cell data
|
||||
|
||||
Returns
|
||||
-------
|
||||
attributes : Dict[str, Union[int, str]]
|
||||
Dictionary with attributes and attribute values
|
||||
"""
|
||||
attributes: dict[str, int | str] = {}
|
||||
style_name = self._process_style(cell.style)
|
||||
if style_name is not None:
|
||||
attributes["stylename"] = style_name
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
attributes["numberrowsspanned"] = max(1, cell.mergestart)
|
||||
attributes["numbercolumnsspanned"] = cell.mergeend
|
||||
return attributes
|
||||
|
||||
def _make_table_cell(self, cell) -> tuple[object, Any]:
|
||||
"""Convert cell data to an OpenDocument spreadsheet cell
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cell : ExcelCell
|
||||
Spreadsheet cell data
|
||||
|
||||
Returns
|
||||
-------
|
||||
pvalue, cell : Tuple[str, TableCell]
|
||||
Display value, Cell value
|
||||
"""
|
||||
from odf.table import TableCell
|
||||
|
||||
attributes = self._make_table_cell_attributes(cell)
|
||||
val, fmt = self._value_with_fmt(cell.val)
|
||||
pvalue = value = val
|
||||
if isinstance(val, bool):
|
||||
value = str(val).lower()
|
||||
pvalue = str(val).upper()
|
||||
if isinstance(val, datetime.datetime):
|
||||
value = val.isoformat()
|
||||
pvalue = val.strftime("%c")
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
||||
)
|
||||
elif isinstance(val, datetime.date):
|
||||
value = val.strftime("%Y-%m-%d")
|
||||
pvalue = val.strftime("%x")
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
||||
)
|
||||
else:
|
||||
class_to_cell_type = {
|
||||
str: "string",
|
||||
int: "float",
|
||||
float: "float",
|
||||
bool: "boolean",
|
||||
}
|
||||
return (
|
||||
pvalue,
|
||||
TableCell(
|
||||
valuetype=class_to_cell_type[type(val)],
|
||||
value=value,
|
||||
attributes=attributes,
|
||||
),
|
||||
)
|
||||
|
||||
def _process_style(self, style: dict[str, Any]) -> str:
|
||||
"""Convert a style dictionary to a OpenDocument style sheet
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style : Dict
|
||||
Style dictionary
|
||||
|
||||
Returns
|
||||
-------
|
||||
style_key : str
|
||||
Unique style key for later reference in sheet
|
||||
"""
|
||||
from odf.style import (
|
||||
ParagraphProperties,
|
||||
Style,
|
||||
TableCellProperties,
|
||||
TextProperties,
|
||||
)
|
||||
|
||||
if style is None:
|
||||
return None
|
||||
style_key = json.dumps(style)
|
||||
if style_key in self._style_dict:
|
||||
return self._style_dict[style_key]
|
||||
name = f"pd{len(self._style_dict)+1}"
|
||||
self._style_dict[style_key] = name
|
||||
odf_style = Style(name=name, family="table-cell")
|
||||
if "font" in style:
|
||||
font = style["font"]
|
||||
if font.get("bold", False):
|
||||
odf_style.addElement(TextProperties(fontweight="bold"))
|
||||
if "borders" in style:
|
||||
borders = style["borders"]
|
||||
for side, thickness in borders.items():
|
||||
thickness_translation = {"thin": "0.75pt solid #000000"}
|
||||
odf_style.addElement(
|
||||
TableCellProperties(
|
||||
attributes={f"border{side}": thickness_translation[thickness]}
|
||||
)
|
||||
)
|
||||
if "alignment" in style:
|
||||
alignment = style["alignment"]
|
||||
horizontal = alignment.get("horizontal")
|
||||
if horizontal:
|
||||
odf_style.addElement(ParagraphProperties(textalign=horizontal))
|
||||
vertical = alignment.get("vertical")
|
||||
if vertical:
|
||||
odf_style.addElement(TableCellProperties(verticalalign=vertical))
|
||||
self.book.styles.addElement(odf_style)
|
||||
return name
|
||||
|
||||
def _create_freeze_panes(
|
||||
self, sheet_name: str, freeze_panes: tuple[int, int]
|
||||
) -> None:
|
||||
"""
|
||||
Create freeze panes in the sheet.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sheet_name : str
|
||||
Name of the spreadsheet
|
||||
freeze_panes : tuple of (int, int)
|
||||
Freeze pane location x and y
|
||||
"""
|
||||
from odf.config import (
|
||||
ConfigItem,
|
||||
ConfigItemMapEntry,
|
||||
ConfigItemMapIndexed,
|
||||
ConfigItemMapNamed,
|
||||
ConfigItemSet,
|
||||
)
|
||||
|
||||
config_item_set = ConfigItemSet(name="ooo:view-settings")
|
||||
self.book.settings.addElement(config_item_set)
|
||||
|
||||
config_item_map_indexed = ConfigItemMapIndexed(name="Views")
|
||||
config_item_set.addElement(config_item_map_indexed)
|
||||
|
||||
config_item_map_entry = ConfigItemMapEntry()
|
||||
config_item_map_indexed.addElement(config_item_map_entry)
|
||||
|
||||
config_item_map_named = ConfigItemMapNamed(name="Tables")
|
||||
config_item_map_entry.addElement(config_item_map_named)
|
||||
|
||||
config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
|
||||
config_item_map_named.addElement(config_item_map_entry)
|
||||
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="HorizontalSplitMode", type="short", text="2")
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="VerticalSplitMode", type="short", text="2")
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(
|
||||
name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
|
||||
)
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(
|
||||
name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
|
||||
)
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
|
||||
)
|
||||
config_item_map_entry.addElement(
|
||||
ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
|
||||
)
|
597
.venv/Lib/site-packages/pandas/io/excel/_openpyxl.py
Normal file
597
.venv/Lib/site-packages/pandas/io/excel/_openpyxl.py
Normal file
@ -0,0 +1,597 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import mmap
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
|
||||
from pandas.io.excel._base import (
|
||||
BaseExcelReader,
|
||||
ExcelWriter,
|
||||
)
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openpyxl.descriptors.serialisable import Serialisable
|
||||
|
||||
|
||||
class OpenpyxlWriter(ExcelWriter):
|
||||
engine = "openpyxl"
|
||||
supported_extensions = (".xlsx", ".xlsm")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path,
|
||||
engine=None,
|
||||
date_format=None,
|
||||
datetime_format=None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions = None,
|
||||
if_sheet_exists: str | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
# Use the openpyxl module as the Excel writer.
|
||||
from openpyxl.workbook import Workbook
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
# ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
|
||||
# the file and later write to it
|
||||
if "r+" in self.mode: # Load from existing workbook
|
||||
from openpyxl import load_workbook
|
||||
|
||||
self.book = load_workbook(self.handles.handle, **engine_kwargs)
|
||||
self.handles.handle.seek(0)
|
||||
self.sheets = {name: self.book[name] for name in self.book.sheetnames}
|
||||
|
||||
else:
|
||||
# Create workbook object with default optimized_write=True.
|
||||
self.book = Workbook(**engine_kwargs)
|
||||
|
||||
if self.book.worksheets:
|
||||
self.book.remove(self.book.worksheets[0])
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
self.book.save(self.handles.handle)
|
||||
if "r+" in self.mode and not isinstance(self.handles.handle, mmap.mmap):
|
||||
# truncate file to the written content
|
||||
self.handles.handle.truncate()
|
||||
|
||||
@classmethod
|
||||
def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
|
||||
"""
|
||||
Convert a style_dict to a set of kwargs suitable for initializing
|
||||
or updating-on-copy an openpyxl v2 style object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'font'
|
||||
'fill'
|
||||
'border' ('borders')
|
||||
'alignment'
|
||||
'number_format'
|
||||
'protection'
|
||||
|
||||
Returns
|
||||
-------
|
||||
style_kwargs : dict
|
||||
A dict with the same, normalized keys as ``style_dict`` but each
|
||||
value has been replaced with a native openpyxl style object of the
|
||||
appropriate class.
|
||||
"""
|
||||
_style_key_map = {"borders": "border"}
|
||||
|
||||
style_kwargs: dict[str, Serialisable] = {}
|
||||
for k, v in style_dict.items():
|
||||
if k in _style_key_map:
|
||||
k = _style_key_map[k]
|
||||
_conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
|
||||
new_v = _conv_to_x(v)
|
||||
if new_v:
|
||||
style_kwargs[k] = new_v
|
||||
|
||||
return style_kwargs
|
||||
|
||||
@classmethod
|
||||
def _convert_to_color(cls, color_spec):
|
||||
"""
|
||||
Convert ``color_spec`` to an openpyxl v2 Color object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
color_spec : str, dict
|
||||
A 32-bit ARGB hex string, or a dict with zero or more of the
|
||||
following keys.
|
||||
'rgb'
|
||||
'indexed'
|
||||
'auto'
|
||||
'theme'
|
||||
'tint'
|
||||
'index'
|
||||
'type'
|
||||
|
||||
Returns
|
||||
-------
|
||||
color : openpyxl.styles.Color
|
||||
"""
|
||||
from openpyxl.styles import Color
|
||||
|
||||
if isinstance(color_spec, str):
|
||||
return Color(color_spec)
|
||||
else:
|
||||
return Color(**color_spec)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_font(cls, font_dict):
|
||||
"""
|
||||
Convert ``font_dict`` to an openpyxl v2 Font object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
font_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'name'
|
||||
'size' ('sz')
|
||||
'bold' ('b')
|
||||
'italic' ('i')
|
||||
'underline' ('u')
|
||||
'strikethrough' ('strike')
|
||||
'color'
|
||||
'vertAlign' ('vertalign')
|
||||
'charset'
|
||||
'scheme'
|
||||
'family'
|
||||
'outline'
|
||||
'shadow'
|
||||
'condense'
|
||||
|
||||
Returns
|
||||
-------
|
||||
font : openpyxl.styles.Font
|
||||
"""
|
||||
from openpyxl.styles import Font
|
||||
|
||||
_font_key_map = {
|
||||
"sz": "size",
|
||||
"b": "bold",
|
||||
"i": "italic",
|
||||
"u": "underline",
|
||||
"strike": "strikethrough",
|
||||
"vertalign": "vertAlign",
|
||||
}
|
||||
|
||||
font_kwargs = {}
|
||||
for k, v in font_dict.items():
|
||||
if k in _font_key_map:
|
||||
k = _font_key_map[k]
|
||||
if k == "color":
|
||||
v = cls._convert_to_color(v)
|
||||
font_kwargs[k] = v
|
||||
|
||||
return Font(**font_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_stop(cls, stop_seq):
|
||||
"""
|
||||
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
|
||||
suitable for initializing the ``GradientFill`` ``stop`` parameter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
stop_seq : iterable
|
||||
An iterable that yields objects suitable for consumption by
|
||||
``_convert_to_color``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
stop : list of openpyxl.styles.Color
|
||||
"""
|
||||
return map(cls._convert_to_color, stop_seq)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_fill(cls, fill_dict):
|
||||
"""
|
||||
Convert ``fill_dict`` to an openpyxl v2 Fill object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fill_dict : dict
|
||||
A dict with one or more of the following keys (or their synonyms),
|
||||
'fill_type' ('patternType', 'patterntype')
|
||||
'start_color' ('fgColor', 'fgcolor')
|
||||
'end_color' ('bgColor', 'bgcolor')
|
||||
or one or more of the following keys (or their synonyms).
|
||||
'type' ('fill_type')
|
||||
'degree'
|
||||
'left'
|
||||
'right'
|
||||
'top'
|
||||
'bottom'
|
||||
'stop'
|
||||
|
||||
Returns
|
||||
-------
|
||||
fill : openpyxl.styles.Fill
|
||||
"""
|
||||
from openpyxl.styles import (
|
||||
GradientFill,
|
||||
PatternFill,
|
||||
)
|
||||
|
||||
_pattern_fill_key_map = {
|
||||
"patternType": "fill_type",
|
||||
"patterntype": "fill_type",
|
||||
"fgColor": "start_color",
|
||||
"fgcolor": "start_color",
|
||||
"bgColor": "end_color",
|
||||
"bgcolor": "end_color",
|
||||
}
|
||||
|
||||
_gradient_fill_key_map = {"fill_type": "type"}
|
||||
|
||||
pfill_kwargs = {}
|
||||
gfill_kwargs = {}
|
||||
for k, v in fill_dict.items():
|
||||
pk = gk = None
|
||||
if k in _pattern_fill_key_map:
|
||||
pk = _pattern_fill_key_map[k]
|
||||
if k in _gradient_fill_key_map:
|
||||
gk = _gradient_fill_key_map[k]
|
||||
if pk in ["start_color", "end_color"]:
|
||||
v = cls._convert_to_color(v)
|
||||
if gk == "stop":
|
||||
v = cls._convert_to_stop(v)
|
||||
if pk:
|
||||
pfill_kwargs[pk] = v
|
||||
elif gk:
|
||||
gfill_kwargs[gk] = v
|
||||
else:
|
||||
pfill_kwargs[k] = v
|
||||
gfill_kwargs[k] = v
|
||||
|
||||
try:
|
||||
return PatternFill(**pfill_kwargs)
|
||||
except TypeError:
|
||||
return GradientFill(**gfill_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_side(cls, side_spec):
|
||||
"""
|
||||
Convert ``side_spec`` to an openpyxl v2 Side object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
side_spec : str, dict
|
||||
A string specifying the border style, or a dict with zero or more
|
||||
of the following keys (or their synonyms).
|
||||
'style' ('border_style')
|
||||
'color'
|
||||
|
||||
Returns
|
||||
-------
|
||||
side : openpyxl.styles.Side
|
||||
"""
|
||||
from openpyxl.styles import Side
|
||||
|
||||
_side_key_map = {"border_style": "style"}
|
||||
|
||||
if isinstance(side_spec, str):
|
||||
return Side(style=side_spec)
|
||||
|
||||
side_kwargs = {}
|
||||
for k, v in side_spec.items():
|
||||
if k in _side_key_map:
|
||||
k = _side_key_map[k]
|
||||
if k == "color":
|
||||
v = cls._convert_to_color(v)
|
||||
side_kwargs[k] = v
|
||||
|
||||
return Side(**side_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_border(cls, border_dict):
|
||||
"""
|
||||
Convert ``border_dict`` to an openpyxl v2 Border object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
border_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'left'
|
||||
'right'
|
||||
'top'
|
||||
'bottom'
|
||||
'diagonal'
|
||||
'diagonal_direction'
|
||||
'vertical'
|
||||
'horizontal'
|
||||
'diagonalUp' ('diagonalup')
|
||||
'diagonalDown' ('diagonaldown')
|
||||
'outline'
|
||||
|
||||
Returns
|
||||
-------
|
||||
border : openpyxl.styles.Border
|
||||
"""
|
||||
from openpyxl.styles import Border
|
||||
|
||||
_border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
|
||||
|
||||
border_kwargs = {}
|
||||
for k, v in border_dict.items():
|
||||
if k in _border_key_map:
|
||||
k = _border_key_map[k]
|
||||
if k == "color":
|
||||
v = cls._convert_to_color(v)
|
||||
if k in ["left", "right", "top", "bottom", "diagonal"]:
|
||||
v = cls._convert_to_side(v)
|
||||
border_kwargs[k] = v
|
||||
|
||||
return Border(**border_kwargs)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_alignment(cls, alignment_dict):
|
||||
"""
|
||||
Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
alignment_dict : dict
|
||||
A dict with zero or more of the following keys (or their synonyms).
|
||||
'horizontal'
|
||||
'vertical'
|
||||
'text_rotation'
|
||||
'wrap_text'
|
||||
'shrink_to_fit'
|
||||
'indent'
|
||||
Returns
|
||||
-------
|
||||
alignment : openpyxl.styles.Alignment
|
||||
"""
|
||||
from openpyxl.styles import Alignment
|
||||
|
||||
return Alignment(**alignment_dict)
|
||||
|
||||
@classmethod
|
||||
def _convert_to_number_format(cls, number_format_dict):
|
||||
"""
|
||||
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
|
||||
initializer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
number_format_dict : dict
|
||||
A dict with zero or more of the following keys.
|
||||
'format_code' : str
|
||||
|
||||
Returns
|
||||
-------
|
||||
number_format : str
|
||||
"""
|
||||
return number_format_dict["format_code"]
|
||||
|
||||
@classmethod
|
||||
def _convert_to_protection(cls, protection_dict):
|
||||
"""
|
||||
Convert ``protection_dict`` to an openpyxl v2 Protection object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
protection_dict : dict
|
||||
A dict with zero or more of the following keys.
|
||||
'locked'
|
||||
'hidden'
|
||||
|
||||
Returns
|
||||
-------
|
||||
"""
|
||||
from openpyxl.styles import Protection
|
||||
|
||||
return Protection(**protection_dict)
|
||||
|
||||
def write_cells(
|
||||
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
|
||||
):
|
||||
# Write the frame cells using openpyxl.
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
|
||||
_style_cache: dict[str, dict[str, Serialisable]] = {}
|
||||
|
||||
if sheet_name in self.sheets and self.if_sheet_exists != "new":
|
||||
if "r+" in self.mode:
|
||||
if self.if_sheet_exists == "replace":
|
||||
old_wks = self.sheets[sheet_name]
|
||||
target_index = self.book.index(old_wks)
|
||||
del self.book[sheet_name]
|
||||
wks = self.book.create_sheet(sheet_name, target_index)
|
||||
self.sheets[sheet_name] = wks
|
||||
elif self.if_sheet_exists == "error":
|
||||
raise ValueError(
|
||||
f"Sheet '{sheet_name}' already exists and "
|
||||
f"if_sheet_exists is set to 'error'."
|
||||
)
|
||||
elif self.if_sheet_exists == "overlay":
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"'{self.if_sheet_exists}' is not valid for if_sheet_exists. "
|
||||
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
||||
)
|
||||
else:
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
wks = self.book.create_sheet()
|
||||
wks.title = sheet_name
|
||||
self.sheets[sheet_name] = wks
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
wks.freeze_panes = wks.cell(
|
||||
row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
|
||||
)
|
||||
|
||||
for cell in cells:
|
||||
xcell = wks.cell(
|
||||
row=startrow + cell.row + 1, column=startcol + cell.col + 1
|
||||
)
|
||||
xcell.value, fmt = self._value_with_fmt(cell.val)
|
||||
if fmt:
|
||||
xcell.number_format = fmt
|
||||
|
||||
style_kwargs: dict[str, Serialisable] | None = {}
|
||||
if cell.style:
|
||||
key = str(cell.style)
|
||||
style_kwargs = _style_cache.get(key)
|
||||
if style_kwargs is None:
|
||||
style_kwargs = self._convert_to_style_kwargs(cell.style)
|
||||
_style_cache[key] = style_kwargs
|
||||
|
||||
if style_kwargs:
|
||||
for k, v in style_kwargs.items():
|
||||
setattr(xcell, k, v)
|
||||
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
|
||||
wks.merge_cells(
|
||||
start_row=startrow + cell.row + 1,
|
||||
start_column=startcol + cell.col + 1,
|
||||
end_column=startcol + cell.mergeend + 1,
|
||||
end_row=startrow + cell.mergestart + 1,
|
||||
)
|
||||
|
||||
# When cells are merged only the top-left cell is preserved
|
||||
# The behaviour of the other cells in a merged range is
|
||||
# undefined
|
||||
if style_kwargs:
|
||||
first_row = startrow + cell.row + 1
|
||||
last_row = startrow + cell.mergestart + 1
|
||||
first_col = startcol + cell.col + 1
|
||||
last_col = startcol + cell.mergeend + 1
|
||||
|
||||
for row in range(first_row, last_row + 1):
|
||||
for col in range(first_col, last_col + 1):
|
||||
if row == first_row and col == first_col:
|
||||
# Ignore first cell. It is already handled.
|
||||
continue
|
||||
xcell = wks.cell(column=col, row=row)
|
||||
for k, v in style_kwargs.items():
|
||||
setattr(xcell, k, v)
|
||||
|
||||
|
||||
class OpenpyxlReader(BaseExcelReader):
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions = None,
|
||||
) -> None:
|
||||
"""
|
||||
Reader using openpyxl engine.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path object or Workbook
|
||||
Object to be parsed.
|
||||
storage_options : dict, optional
|
||||
passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
|
||||
"""
|
||||
import_optional_dependency("openpyxl")
|
||||
super().__init__(filepath_or_buffer, storage_options=storage_options)
|
||||
|
||||
@property
|
||||
def _workbook_class(self):
|
||||
from openpyxl import Workbook
|
||||
|
||||
return Workbook
|
||||
|
||||
def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
|
||||
from openpyxl import load_workbook
|
||||
|
||||
return load_workbook(
|
||||
filepath_or_buffer, read_only=True, data_only=True, keep_links=False
|
||||
)
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
return [sheet.title for sheet in self.book.worksheets]
|
||||
|
||||
def get_sheet_by_name(self, name: str):
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book[name]
|
||||
|
||||
def get_sheet_by_index(self, index: int):
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
return self.book.worksheets[index]
|
||||
|
||||
def _convert_cell(self, cell, convert_float: bool) -> Scalar:
|
||||
|
||||
from openpyxl.cell.cell import (
|
||||
TYPE_ERROR,
|
||||
TYPE_NUMERIC,
|
||||
)
|
||||
|
||||
if cell.value is None:
|
||||
return "" # compat with xlrd
|
||||
elif cell.data_type == TYPE_ERROR:
|
||||
return np.nan
|
||||
elif not convert_float and cell.data_type == TYPE_NUMERIC:
|
||||
return float(cell.value)
|
||||
|
||||
return cell.value
|
||||
|
||||
def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
|
||||
|
||||
if self.book.read_only:
|
||||
sheet.reset_dimensions()
|
||||
|
||||
data: list[list[Scalar]] = []
|
||||
last_row_with_data = -1
|
||||
for row_number, row in enumerate(sheet.rows):
|
||||
converted_row = [self._convert_cell(cell, convert_float) for cell in row]
|
||||
while converted_row and converted_row[-1] == "":
|
||||
# trim trailing empty elements
|
||||
converted_row.pop()
|
||||
if converted_row:
|
||||
last_row_with_data = row_number
|
||||
data.append(converted_row)
|
||||
|
||||
# Trim trailing empty rows
|
||||
data = data[: last_row_with_data + 1]
|
||||
|
||||
if len(data) > 0:
|
||||
# extend rows to max width
|
||||
max_width = max(len(data_row) for data_row in data)
|
||||
if min(len(data_row) for data_row in data) < max_width:
|
||||
empty_cell: list[Scalar] = [""]
|
||||
data = [
|
||||
data_row + (max_width - len(data_row)) * empty_cell
|
||||
for data_row in data
|
||||
]
|
||||
|
||||
return data
|
103
.venv/Lib/site-packages/pandas/io/excel/_pyxlsb.py
Normal file
103
.venv/Lib/site-packages/pandas/io/excel/_pyxlsb.py
Normal file
@ -0,0 +1,103 @@
|
||||
# pyright: reportMissingImports=false
|
||||
from __future__ import annotations
|
||||
|
||||
from pandas._typing import (
|
||||
FilePath,
|
||||
ReadBuffer,
|
||||
Scalar,
|
||||
StorageOptions,
|
||||
)
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
|
||||
class PyxlsbReader(BaseExcelReader):
|
||||
def __init__(
|
||||
self,
|
||||
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
||||
storage_options: StorageOptions = None,
|
||||
):
|
||||
"""
|
||||
Reader using pyxlsb engine.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path object, or Workbook
|
||||
Object to be parsed.
|
||||
storage_options : dict, optional
|
||||
passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
|
||||
"""
|
||||
import_optional_dependency("pyxlsb")
|
||||
# This will call load_workbook on the filepath or buffer
|
||||
# And set the result to the book-attribute
|
||||
super().__init__(filepath_or_buffer, storage_options=storage_options)
|
||||
|
||||
@property
|
||||
def _workbook_class(self):
|
||||
from pyxlsb import Workbook
|
||||
|
||||
return Workbook
|
||||
|
||||
def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
|
||||
from pyxlsb import open_workbook
|
||||
|
||||
# TODO: hack in buffer capability
|
||||
# This might need some modifications to the Pyxlsb library
|
||||
# Actual work for opening it is in xlsbpackage.py, line 20-ish
|
||||
|
||||
return open_workbook(filepath_or_buffer)
|
||||
|
||||
@property
|
||||
def sheet_names(self) -> list[str]:
|
||||
return self.book.sheets
|
||||
|
||||
def get_sheet_by_name(self, name: str):
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book.get_sheet(name)
|
||||
|
||||
def get_sheet_by_index(self, index: int):
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
# pyxlsb sheets are indexed from 1 onwards
|
||||
# There's a fix for this in the source, but the pypi package doesn't have it
|
||||
return self.book.get_sheet(index + 1)
|
||||
|
||||
def _convert_cell(self, cell, convert_float: bool) -> Scalar:
|
||||
# TODO: there is no way to distinguish between floats and datetimes in pyxlsb
|
||||
# This means that there is no way to read datetime types from an xlsb file yet
|
||||
if cell.v is None:
|
||||
return "" # Prevents non-named columns from not showing up as Unnamed: i
|
||||
if isinstance(cell.v, float) and convert_float:
|
||||
val = int(cell.v)
|
||||
if val == cell.v:
|
||||
return val
|
||||
else:
|
||||
return float(cell.v)
|
||||
|
||||
return cell.v
|
||||
|
||||
def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
|
||||
data: list[list[Scalar]] = []
|
||||
prevous_row_number = -1
|
||||
# When sparse=True the rows can have different lengths and empty rows are
|
||||
# not returned. The cells are namedtuples of row, col, value (r, c, v).
|
||||
for row in sheet.rows(sparse=True):
|
||||
row_number = row[0].r
|
||||
converted_row = [self._convert_cell(cell, convert_float) for cell in row]
|
||||
while converted_row and converted_row[-1] == "":
|
||||
# trim trailing empty elements
|
||||
converted_row.pop()
|
||||
if converted_row:
|
||||
data.extend([[]] * (row_number - prevous_row_number - 1))
|
||||
data.append(converted_row)
|
||||
prevous_row_number = row_number
|
||||
if data:
|
||||
# extend rows to max_width
|
||||
max_width = max(len(data_row) for data_row in data)
|
||||
if min(len(data_row) for data_row in data) < max_width:
|
||||
empty_cell: list[Scalar] = [""]
|
||||
data = [
|
||||
data_row + (max_width - len(data_row)) * empty_cell
|
||||
for data_row in data
|
||||
]
|
||||
return data
|
335
.venv/Lib/site-packages/pandas/io/excel/_util.py
Normal file
335
.venv/Lib/site-packages/pandas/io/excel/_util.py
Normal file
@ -0,0 +1,335 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Hashable,
|
||||
Iterable,
|
||||
Literal,
|
||||
MutableMapping,
|
||||
Sequence,
|
||||
TypeVar,
|
||||
overload,
|
||||
)
|
||||
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
|
||||
from pandas.core.dtypes.common import (
|
||||
is_integer,
|
||||
is_list_like,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
|
||||
ExcelWriter_t = type[ExcelWriter]
|
||||
usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
|
||||
|
||||
_writers: MutableMapping[str, ExcelWriter_t] = {}
|
||||
|
||||
|
||||
def register_writer(klass: ExcelWriter_t) -> None:
|
||||
"""
|
||||
Add engine to the excel writer registry.io.excel.
|
||||
|
||||
You must use this method to integrate with ``to_excel``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
klass : ExcelWriter
|
||||
"""
|
||||
if not callable(klass):
|
||||
raise ValueError("Can only register callables as engines")
|
||||
engine_name = klass.engine
|
||||
# for mypy
|
||||
assert isinstance(engine_name, str)
|
||||
_writers[engine_name] = klass
|
||||
|
||||
|
||||
def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
|
||||
"""
|
||||
Return the default reader/writer for the given extension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ext : str
|
||||
The excel file extension for which to get the default engine.
|
||||
mode : str {'reader', 'writer'}
|
||||
Whether to get the default engine for reading or writing.
|
||||
Either 'reader' or 'writer'
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
The default engine for the extension.
|
||||
"""
|
||||
_default_readers = {
|
||||
"xlsx": "openpyxl",
|
||||
"xlsm": "openpyxl",
|
||||
"xlsb": "pyxlsb",
|
||||
"xls": "xlrd",
|
||||
"ods": "odf",
|
||||
}
|
||||
_default_writers = {
|
||||
"xlsx": "openpyxl",
|
||||
"xlsm": "openpyxl",
|
||||
"xlsb": "pyxlsb",
|
||||
"xls": "xlwt",
|
||||
"ods": "odf",
|
||||
}
|
||||
assert mode in ["reader", "writer"]
|
||||
if mode == "writer":
|
||||
# Prefer xlsxwriter over openpyxl if installed
|
||||
xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
|
||||
if xlsxwriter:
|
||||
_default_writers["xlsx"] = "xlsxwriter"
|
||||
return _default_writers[ext]
|
||||
else:
|
||||
return _default_readers[ext]
|
||||
|
||||
|
||||
def get_writer(engine_name: str) -> ExcelWriter_t:
|
||||
try:
|
||||
return _writers[engine_name]
|
||||
except KeyError as err:
|
||||
raise ValueError(f"No Excel writer '{engine_name}'") from err
|
||||
|
||||
|
||||
def _excel2num(x: str) -> int:
|
||||
"""
|
||||
Convert Excel column name like 'AB' to 0-based column index.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : str
|
||||
The Excel column name to convert to a 0-based column index.
|
||||
|
||||
Returns
|
||||
-------
|
||||
num : int
|
||||
The column index corresponding to the name.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
Part of the Excel column name was invalid.
|
||||
"""
|
||||
index = 0
|
||||
|
||||
for c in x.upper().strip():
|
||||
cp = ord(c)
|
||||
|
||||
if cp < ord("A") or cp > ord("Z"):
|
||||
raise ValueError(f"Invalid column name: {x}")
|
||||
|
||||
index = index * 26 + cp - ord("A") + 1
|
||||
|
||||
return index - 1
|
||||
|
||||
|
||||
def _range2cols(areas: str) -> list[int]:
|
||||
"""
|
||||
Convert comma separated list of column names and ranges to indices.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
areas : str
|
||||
A string containing a sequence of column ranges (or areas).
|
||||
|
||||
Returns
|
||||
-------
|
||||
cols : list
|
||||
A list of 0-based column indices.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> _range2cols('A:E')
|
||||
[0, 1, 2, 3, 4]
|
||||
>>> _range2cols('A,C,Z:AB')
|
||||
[0, 2, 25, 26, 27]
|
||||
"""
|
||||
cols: list[int] = []
|
||||
|
||||
for rng in areas.split(","):
|
||||
if ":" in rng:
|
||||
rngs = rng.split(":")
|
||||
cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
|
||||
else:
|
||||
cols.append(_excel2num(rng))
|
||||
|
||||
return cols
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: list[str]) -> list[str]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def maybe_convert_usecols(usecols: None) -> None:
|
||||
...
|
||||
|
||||
|
||||
def maybe_convert_usecols(
|
||||
usecols: str | list[int] | list[str] | usecols_func | None,
|
||||
) -> None | list[int] | list[str] | usecols_func:
|
||||
"""
|
||||
Convert `usecols` into a compatible format for parsing in `parsers.py`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
usecols : object
|
||||
The use-columns object to potentially convert.
|
||||
|
||||
Returns
|
||||
-------
|
||||
converted : object
|
||||
The compatible format of `usecols`.
|
||||
"""
|
||||
if usecols is None:
|
||||
return usecols
|
||||
|
||||
if is_integer(usecols):
|
||||
raise ValueError(
|
||||
"Passing an integer for `usecols` is no longer supported. "
|
||||
"Please pass in a list of int from 0 to `usecols` inclusive instead."
|
||||
)
|
||||
|
||||
if isinstance(usecols, str):
|
||||
return _range2cols(usecols)
|
||||
|
||||
return usecols
|
||||
|
||||
|
||||
@overload
|
||||
def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
|
||||
...
|
||||
|
||||
|
||||
def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
|
||||
if freeze_panes is not None:
|
||||
if len(freeze_panes) == 2 and all(
|
||||
isinstance(item, int) for item in freeze_panes
|
||||
):
|
||||
return True
|
||||
|
||||
raise ValueError(
|
||||
"freeze_panes must be of form (row, column) "
|
||||
"where row and column are integers"
|
||||
)
|
||||
|
||||
# freeze_panes wasn't specified, return False so it won't be applied
|
||||
# to output sheet
|
||||
return False
|
||||
|
||||
|
||||
def fill_mi_header(
|
||||
row: list[Hashable], control_row: list[bool]
|
||||
) -> tuple[list[Hashable], list[bool]]:
|
||||
"""
|
||||
Forward fill blank entries in row but only inside the same parent index.
|
||||
|
||||
Used for creating headers in Multiindex.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
row : list
|
||||
List of items in a single row.
|
||||
control_row : list of bool
|
||||
Helps to determine if particular column is in same parent index as the
|
||||
previous value. Used to stop propagation of empty cells between
|
||||
different indexes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Returns changed row and control_row
|
||||
"""
|
||||
last = row[0]
|
||||
for i in range(1, len(row)):
|
||||
if not control_row[i]:
|
||||
last = row[i]
|
||||
|
||||
if row[i] == "" or row[i] is None:
|
||||
row[i] = last
|
||||
else:
|
||||
control_row[i] = False
|
||||
last = row[i]
|
||||
|
||||
return row, control_row
|
||||
|
||||
|
||||
def pop_header_name(
|
||||
row: list[Hashable], index_col: int | Sequence[int]
|
||||
) -> tuple[Hashable | None, list[Hashable]]:
|
||||
"""
|
||||
Pop the header name for MultiIndex parsing.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
row : list
|
||||
The data row to parse for the header name.
|
||||
index_col : int, list
|
||||
The index columns for our data. Assumed to be non-null.
|
||||
|
||||
Returns
|
||||
-------
|
||||
header_name : str
|
||||
The extracted header name.
|
||||
trimmed_row : list
|
||||
The original data row with the header name removed.
|
||||
"""
|
||||
# Pop out header name and fill w/blank.
|
||||
if is_list_like(index_col):
|
||||
assert isinstance(index_col, Iterable)
|
||||
i = max(index_col)
|
||||
else:
|
||||
assert not isinstance(index_col, Iterable)
|
||||
i = index_col
|
||||
|
||||
header_name = row[i]
|
||||
header_name = None if header_name == "" else header_name
|
||||
|
||||
return header_name, row[:i] + [""] + row[i + 1 :]
|
||||
|
||||
|
||||
def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
|
||||
"""
|
||||
Used to combine two sources of kwargs for the backend engine.
|
||||
|
||||
Use of kwargs is deprecated, this function is solely for use in 1.3 and should
|
||||
be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
|
||||
or kwargs must be None or empty respectively.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
engine_kwargs: dict
|
||||
kwargs to be passed through to the engine.
|
||||
kwargs: dict
|
||||
kwargs to be psased through to the engine (deprecated)
|
||||
|
||||
Returns
|
||||
-------
|
||||
engine_kwargs combined with kwargs
|
||||
"""
|
||||
if engine_kwargs is None:
|
||||
result = {}
|
||||
else:
|
||||
result = engine_kwargs.copy()
|
||||
result.update(kwargs)
|
||||
return result
|
112
.venv/Lib/site-packages/pandas/io/excel/_xlrd.py
Normal file
112
.venv/Lib/site-packages/pandas/io/excel/_xlrd.py
Normal file
@ -0,0 +1,112 @@
|
||||
from datetime import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas._typing import StorageOptions
|
||||
from pandas.compat._optional import import_optional_dependency
|
||||
|
||||
from pandas.io.excel._base import BaseExcelReader
|
||||
|
||||
|
||||
class XlrdReader(BaseExcelReader):
|
||||
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
|
||||
"""
|
||||
Reader using xlrd engine.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filepath_or_buffer : str, path object or Workbook
|
||||
Object to be parsed.
|
||||
storage_options : dict, optional
|
||||
passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
|
||||
"""
|
||||
err_msg = "Install xlrd >= 1.0.0 for Excel support"
|
||||
import_optional_dependency("xlrd", extra=err_msg)
|
||||
super().__init__(filepath_or_buffer, storage_options=storage_options)
|
||||
|
||||
@property
|
||||
def _workbook_class(self):
|
||||
from xlrd import Book
|
||||
|
||||
return Book
|
||||
|
||||
def load_workbook(self, filepath_or_buffer):
|
||||
from xlrd import open_workbook
|
||||
|
||||
if hasattr(filepath_or_buffer, "read"):
|
||||
data = filepath_or_buffer.read()
|
||||
return open_workbook(file_contents=data)
|
||||
else:
|
||||
return open_workbook(filepath_or_buffer)
|
||||
|
||||
@property
|
||||
def sheet_names(self):
|
||||
return self.book.sheet_names()
|
||||
|
||||
def get_sheet_by_name(self, name):
|
||||
self.raise_if_bad_sheet_by_name(name)
|
||||
return self.book.sheet_by_name(name)
|
||||
|
||||
def get_sheet_by_index(self, index):
|
||||
self.raise_if_bad_sheet_by_index(index)
|
||||
return self.book.sheet_by_index(index)
|
||||
|
||||
def get_sheet_data(self, sheet, convert_float):
|
||||
from xlrd import (
|
||||
XL_CELL_BOOLEAN,
|
||||
XL_CELL_DATE,
|
||||
XL_CELL_ERROR,
|
||||
XL_CELL_NUMBER,
|
||||
xldate,
|
||||
)
|
||||
|
||||
epoch1904 = self.book.datemode
|
||||
|
||||
def _parse_cell(cell_contents, cell_typ):
|
||||
"""
|
||||
converts the contents of the cell into a pandas appropriate object
|
||||
"""
|
||||
if cell_typ == XL_CELL_DATE:
|
||||
|
||||
# Use the newer xlrd datetime handling.
|
||||
try:
|
||||
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
|
||||
except OverflowError:
|
||||
return cell_contents
|
||||
|
||||
# Excel doesn't distinguish between dates and time,
|
||||
# so we treat dates on the epoch as times only.
|
||||
# Also, Excel supports 1900 and 1904 epochs.
|
||||
year = (cell_contents.timetuple())[0:3]
|
||||
if (not epoch1904 and year == (1899, 12, 31)) or (
|
||||
epoch1904 and year == (1904, 1, 1)
|
||||
):
|
||||
cell_contents = time(
|
||||
cell_contents.hour,
|
||||
cell_contents.minute,
|
||||
cell_contents.second,
|
||||
cell_contents.microsecond,
|
||||
)
|
||||
|
||||
elif cell_typ == XL_CELL_ERROR:
|
||||
cell_contents = np.nan
|
||||
elif cell_typ == XL_CELL_BOOLEAN:
|
||||
cell_contents = bool(cell_contents)
|
||||
elif convert_float and cell_typ == XL_CELL_NUMBER:
|
||||
# GH5394 - Excel 'numbers' are always floats
|
||||
# it's a minimal perf hit and less surprising
|
||||
val = int(cell_contents)
|
||||
if val == cell_contents:
|
||||
cell_contents = val
|
||||
return cell_contents
|
||||
|
||||
data = []
|
||||
|
||||
for i in range(sheet.nrows):
|
||||
row = [
|
||||
_parse_cell(value, typ)
|
||||
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
|
||||
]
|
||||
data.append(row)
|
||||
|
||||
return data
|
250
.venv/Lib/site-packages/pandas/io/excel/_xlsxwriter.py
Normal file
250
.venv/Lib/site-packages/pandas/io/excel/_xlsxwriter.py
Normal file
@ -0,0 +1,250 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import pandas._libs.json as json
|
||||
from pandas._typing import StorageOptions
|
||||
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
|
||||
|
||||
class _XlsxStyler:
|
||||
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
|
||||
# Ordering necessary for both determinism and because some are keyed by
|
||||
# prefixes of others.
|
||||
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
|
||||
"font": [
|
||||
(("name",), "font_name"),
|
||||
(("sz",), "font_size"),
|
||||
(("size",), "font_size"),
|
||||
(("color", "rgb"), "font_color"),
|
||||
(("color",), "font_color"),
|
||||
(("b",), "bold"),
|
||||
(("bold",), "bold"),
|
||||
(("i",), "italic"),
|
||||
(("italic",), "italic"),
|
||||
(("u",), "underline"),
|
||||
(("underline",), "underline"),
|
||||
(("strike",), "font_strikeout"),
|
||||
(("vertAlign",), "font_script"),
|
||||
(("vertalign",), "font_script"),
|
||||
],
|
||||
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
|
||||
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
|
||||
"alignment": [
|
||||
(("horizontal",), "align"),
|
||||
(("vertical",), "valign"),
|
||||
(("text_rotation",), "rotation"),
|
||||
(("wrap_text",), "text_wrap"),
|
||||
(("indent",), "indent"),
|
||||
(("shrink_to_fit",), "shrink"),
|
||||
],
|
||||
"fill": [
|
||||
(("patternType",), "pattern"),
|
||||
(("patterntype",), "pattern"),
|
||||
(("fill_type",), "pattern"),
|
||||
(("start_color", "rgb"), "fg_color"),
|
||||
(("fgColor", "rgb"), "fg_color"),
|
||||
(("fgcolor", "rgb"), "fg_color"),
|
||||
(("start_color",), "fg_color"),
|
||||
(("fgColor",), "fg_color"),
|
||||
(("fgcolor",), "fg_color"),
|
||||
(("end_color", "rgb"), "bg_color"),
|
||||
(("bgColor", "rgb"), "bg_color"),
|
||||
(("bgcolor", "rgb"), "bg_color"),
|
||||
(("end_color",), "bg_color"),
|
||||
(("bgColor",), "bg_color"),
|
||||
(("bgcolor",), "bg_color"),
|
||||
],
|
||||
"border": [
|
||||
(("color", "rgb"), "border_color"),
|
||||
(("color",), "border_color"),
|
||||
(("style",), "border"),
|
||||
(("top", "color", "rgb"), "top_color"),
|
||||
(("top", "color"), "top_color"),
|
||||
(("top", "style"), "top"),
|
||||
(("top",), "top"),
|
||||
(("right", "color", "rgb"), "right_color"),
|
||||
(("right", "color"), "right_color"),
|
||||
(("right", "style"), "right"),
|
||||
(("right",), "right"),
|
||||
(("bottom", "color", "rgb"), "bottom_color"),
|
||||
(("bottom", "color"), "bottom_color"),
|
||||
(("bottom", "style"), "bottom"),
|
||||
(("bottom",), "bottom"),
|
||||
(("left", "color", "rgb"), "left_color"),
|
||||
(("left", "color"), "left_color"),
|
||||
(("left", "style"), "left"),
|
||||
(("left",), "left"),
|
||||
],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def convert(cls, style_dict, num_format_str=None):
|
||||
"""
|
||||
converts a style_dict to an xlsxwriter format dict
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style_dict : style dictionary to convert
|
||||
num_format_str : optional number format string
|
||||
"""
|
||||
# Create a XlsxWriter format object.
|
||||
props = {}
|
||||
|
||||
if num_format_str is not None:
|
||||
props["num_format"] = num_format_str
|
||||
|
||||
if style_dict is None:
|
||||
return props
|
||||
|
||||
if "borders" in style_dict:
|
||||
style_dict = style_dict.copy()
|
||||
style_dict["border"] = style_dict.pop("borders")
|
||||
|
||||
for style_group_key, style_group in style_dict.items():
|
||||
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
|
||||
# src is a sequence of keys into a nested dict
|
||||
# dst is a flat key
|
||||
if dst in props:
|
||||
continue
|
||||
v = style_group
|
||||
for k in src:
|
||||
try:
|
||||
v = v[k]
|
||||
except (KeyError, TypeError):
|
||||
break
|
||||
else:
|
||||
props[dst] = v
|
||||
|
||||
if isinstance(props.get("pattern"), str):
|
||||
# TODO: support other fill patterns
|
||||
props["pattern"] = 0 if props["pattern"] == "none" else 1
|
||||
|
||||
for k in ["border", "top", "right", "bottom", "left"]:
|
||||
if isinstance(props.get(k), str):
|
||||
try:
|
||||
props[k] = [
|
||||
"none",
|
||||
"thin",
|
||||
"medium",
|
||||
"dashed",
|
||||
"dotted",
|
||||
"thick",
|
||||
"double",
|
||||
"hair",
|
||||
"mediumDashed",
|
||||
"dashDot",
|
||||
"mediumDashDot",
|
||||
"dashDotDot",
|
||||
"mediumDashDotDot",
|
||||
"slantDashDot",
|
||||
].index(props[k])
|
||||
except ValueError:
|
||||
props[k] = 2
|
||||
|
||||
if isinstance(props.get("font_script"), str):
|
||||
props["font_script"] = ["baseline", "superscript", "subscript"].index(
|
||||
props["font_script"]
|
||||
)
|
||||
|
||||
if isinstance(props.get("underline"), str):
|
||||
props["underline"] = {
|
||||
"none": 0,
|
||||
"single": 1,
|
||||
"double": 2,
|
||||
"singleAccounting": 33,
|
||||
"doubleAccounting": 34,
|
||||
}[props["underline"]]
|
||||
|
||||
return props
|
||||
|
||||
|
||||
class XlsxWriter(ExcelWriter):
|
||||
engine = "xlsxwriter"
|
||||
supported_extensions = (".xlsx",)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path,
|
||||
engine=None,
|
||||
date_format=None,
|
||||
datetime_format=None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions = None,
|
||||
if_sheet_exists: str | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
# Use the xlsxwriter module as the Excel writer.
|
||||
from xlsxwriter import Workbook
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
|
||||
if mode == "a":
|
||||
raise ValueError("Append mode is not supported with xlsxwriter!")
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
engine=engine,
|
||||
date_format=date_format,
|
||||
datetime_format=datetime_format,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
self.book = Workbook(self.handles.handle, **engine_kwargs)
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
return self.book.close()
|
||||
|
||||
def write_cells(
|
||||
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
|
||||
):
|
||||
# Write the frame cells using xlsxwriter.
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
|
||||
if sheet_name in self.sheets:
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
wks = self.book.add_worksheet(sheet_name)
|
||||
self.sheets[sheet_name] = wks
|
||||
|
||||
style_dict = {"null": None}
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
wks.freeze_panes(*(freeze_panes))
|
||||
|
||||
for cell in cells:
|
||||
val, fmt = self._value_with_fmt(cell.val)
|
||||
|
||||
stylekey = json.dumps(cell.style)
|
||||
if fmt:
|
||||
stylekey += fmt
|
||||
|
||||
if stylekey in style_dict:
|
||||
style = style_dict[stylekey]
|
||||
else:
|
||||
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
|
||||
style_dict[stylekey] = style
|
||||
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
wks.merge_range(
|
||||
startrow + cell.row,
|
||||
startcol + cell.col,
|
||||
startrow + cell.mergestart,
|
||||
startcol + cell.mergeend,
|
||||
val,
|
||||
style,
|
||||
)
|
||||
else:
|
||||
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
172
.venv/Lib/site-packages/pandas/io/excel/_xlwt.py
Normal file
172
.venv/Lib/site-packages/pandas/io/excel/_xlwt.py
Normal file
@ -0,0 +1,172 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
)
|
||||
|
||||
import pandas._libs.json as json
|
||||
from pandas._typing import StorageOptions
|
||||
|
||||
from pandas.io.excel._base import ExcelWriter
|
||||
from pandas.io.excel._util import (
|
||||
combine_kwargs,
|
||||
validate_freeze_panes,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from xlwt import XFStyle
|
||||
|
||||
|
||||
class XlwtWriter(ExcelWriter):
|
||||
engine = "xlwt"
|
||||
supported_extensions = (".xls",)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path,
|
||||
engine=None,
|
||||
date_format=None,
|
||||
datetime_format=None,
|
||||
encoding=None,
|
||||
mode: str = "w",
|
||||
storage_options: StorageOptions = None,
|
||||
if_sheet_exists: str | None = None,
|
||||
engine_kwargs: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
# Use the xlwt module as the Excel writer.
|
||||
import xlwt
|
||||
|
||||
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
||||
|
||||
if mode == "a":
|
||||
raise ValueError("Append mode is not supported with xlwt!")
|
||||
|
||||
super().__init__(
|
||||
path,
|
||||
mode=mode,
|
||||
storage_options=storage_options,
|
||||
if_sheet_exists=if_sheet_exists,
|
||||
engine_kwargs=engine_kwargs,
|
||||
)
|
||||
|
||||
if encoding is None:
|
||||
encoding = "ascii"
|
||||
self.book = xlwt.Workbook(encoding=encoding, **engine_kwargs)
|
||||
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
|
||||
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Save workbook to disk.
|
||||
"""
|
||||
if self.sheets:
|
||||
# fails when the ExcelWriter is just opened and then closed
|
||||
self.book.save(self.handles.handle)
|
||||
|
||||
def write_cells(
|
||||
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
|
||||
):
|
||||
|
||||
sheet_name = self._get_sheet_name(sheet_name)
|
||||
|
||||
if sheet_name in self.sheets:
|
||||
wks = self.sheets[sheet_name]
|
||||
else:
|
||||
wks = self.book.add_sheet(sheet_name)
|
||||
self.sheets[sheet_name] = wks
|
||||
|
||||
if validate_freeze_panes(freeze_panes):
|
||||
wks.set_panes_frozen(True)
|
||||
wks.set_horz_split_pos(freeze_panes[0])
|
||||
wks.set_vert_split_pos(freeze_panes[1])
|
||||
|
||||
style_dict: dict[str, XFStyle] = {}
|
||||
|
||||
for cell in cells:
|
||||
val, fmt = self._value_with_fmt(cell.val)
|
||||
|
||||
stylekey = json.dumps(cell.style)
|
||||
if fmt:
|
||||
stylekey += fmt
|
||||
|
||||
if stylekey in style_dict:
|
||||
style = style_dict[stylekey]
|
||||
else:
|
||||
style = self._convert_to_style(cell.style, fmt)
|
||||
style_dict[stylekey] = style
|
||||
|
||||
if cell.mergestart is not None and cell.mergeend is not None:
|
||||
wks.write_merge(
|
||||
startrow + cell.row,
|
||||
startrow + cell.mergestart,
|
||||
startcol + cell.col,
|
||||
startcol + cell.mergeend,
|
||||
val,
|
||||
style,
|
||||
)
|
||||
else:
|
||||
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
||||
|
||||
@classmethod
|
||||
def _style_to_xlwt(
|
||||
cls, item, firstlevel: bool = True, field_sep=",", line_sep=";"
|
||||
) -> str:
|
||||
"""
|
||||
helper which recursively generate an xlwt easy style string
|
||||
for example:
|
||||
|
||||
hstyle = {"font": {"bold": True},
|
||||
"border": {"top": "thin",
|
||||
"right": "thin",
|
||||
"bottom": "thin",
|
||||
"left": "thin"},
|
||||
"align": {"horiz": "center"}}
|
||||
will be converted to
|
||||
font: bold on; \
|
||||
border: top thin, right thin, bottom thin, left thin; \
|
||||
align: horiz center;
|
||||
"""
|
||||
if hasattr(item, "items"):
|
||||
if firstlevel:
|
||||
it = [
|
||||
f"{key}: {cls._style_to_xlwt(value, False)}"
|
||||
for key, value in item.items()
|
||||
]
|
||||
out = f"{line_sep.join(it)} "
|
||||
return out
|
||||
else:
|
||||
it = [
|
||||
f"{key} {cls._style_to_xlwt(value, False)}"
|
||||
for key, value in item.items()
|
||||
]
|
||||
out = f"{field_sep.join(it)} "
|
||||
return out
|
||||
else:
|
||||
item = f"{item}"
|
||||
item = item.replace("True", "on")
|
||||
item = item.replace("False", "off")
|
||||
return item
|
||||
|
||||
@classmethod
|
||||
def _convert_to_style(cls, style_dict, num_format_str=None):
|
||||
"""
|
||||
converts a style_dict to an xlwt style object
|
||||
|
||||
Parameters
|
||||
----------
|
||||
style_dict : style dictionary to convert
|
||||
num_format_str : optional number format string
|
||||
"""
|
||||
import xlwt
|
||||
|
||||
if style_dict:
|
||||
xlwt_stylestr = cls._style_to_xlwt(style_dict)
|
||||
style = xlwt.easyxf(xlwt_stylestr, field_sep=",", line_sep=";")
|
||||
else:
|
||||
style = xlwt.XFStyle()
|
||||
if num_format_str is not None:
|
||||
style.num_format_str = num_format_str
|
||||
|
||||
return style
|
Reference in New Issue
Block a user