Untitled
unknown
plain_text
a year ago
39 kB
9
Indexable
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import pandas as pd
from colorama import init
from datetime import timedelta
import tkinter as tk
from tkinter import scrolledtext, filedialog
from tkinter import ttk
from tkcalendar import DateEntry
import openpyxl
# Initialize colorama
init(autoreset=True)
def load_data(file_path):
try:
data = pd.read_csv(file_path, delimiter=';', encoding='latin1')
return data
except Exception as e:
print(f"Error loading file: {e}")
return None
def sum_billed(status_counts, positive=True):
if positive:
billed_total = status_counts.get('zbilingowany', 0) + status_counts.get('zbilingowany - POPC', 0)
else:
billed_total = status_counts.get('zbilingowany negatywnie', 0) + status_counts.get('zbilingowany negatywnie - POPC', 0)
return billed_total
def sum_transferred(status_counts):
transferred_total = (
status_counts.get('przekazane do call center - POPC', 0) +
status_counts.get('przekazane do call center - MMP', 0) +
status_counts.get('przekazane do call center - OK - POPC', 0) +
status_counts.get('przekazane do call center - OK', 0) +
status_counts.get('przekazane do call center - MMP - POPC', 0)
)
return transferred_total
def sum_unfinished(status_counts):
unfinished_total = (
status_counts.get('nieukończony wniosek', 0) +
status_counts.get('nieukończony wniosek - POPC', 0)
)
return unfinished_total
def filter_out_tests(data, column):
return data[~data[column].str.contains('Test', case=False, na=False)]
def print_status_counts(text_area, title, status_counts, sum_positive, sum_negative, sum_transferred, sum_unfinished):
text_area.insert(tk.END, f"{title}\n", 'green')
for status, count in status_counts.items():
text_area.insert(tk.END, f"{status}: {count}\n")
text_area.insert(tk.END, f"\nzbilingowany + zbilingowany - POPC: {sum_positive}\n", 'yellow')
text_area.insert(tk.END, f"zbilingowany negatywnie + zbilingowany negatywnie - POPC: {sum_negative}\n", 'red')
text_area.insert(tk.END, f"przekazane do call center - POPC + MMP + OK - POPC + OK + MMP - POPC: {sum_transferred}\n", 'cyan')
text_area.insert(tk.END, f"nieukończony wniosek + nieukończony wniosek - POPC: {sum_unfinished}\n", 'magenta')
text_area.insert(tk.END, "\n")
def print_special_status_counts(text_area, title, status_counts):
text_area.insert(tk.END, f"{title}\n", 'green')
for status, count in status_counts.items():
text_area.insert(tk.END, f"{status}: {count}\n")
text_area.insert(tk.END, "\n")
def filter_and_display_data(data, text_areas, start_date, end_date):
if data is not None:
for text_area in text_areas.values():
text_area.delete('1.0', tk.END) # Clear previous text
# Get the data from columns "D", "L", "P" and "K"
source_column = data.columns[3] # Assuming "D" is the fourth column (index 3)
status_column = data.columns[11] # Assuming "L" is the twelfth column (index 11)
surname_column = data.columns[15] # Assuming "P" is the sixteenth column (index 15)
order_date_column = data.columns[7] # Assuming "H" is the eighth column (index 7)
installation_date_column = data.columns[10] # Assuming "K" is the eleventh column (index 10)
# Check if "Nazwa paczki" is in column "AC" or "AG"
if len(data.columns) > 28 and data.columns[28] == 'Nazwa paczki':
package_column = data.columns[28] # Assuming "AC" is the twenty-ninth column (index 28)
elif len(data.columns) > 32 and data.columns[32] == 'Nazwa paczki':
package_column = data.columns[32] # Assuming "AG" is the thirty-third column (index 32)
else:
text_areas['Podsumowanie'].insert(tk.END, "Nazwa paczki column not found.\n", 'red')
return
# Filter out rows where column "P" contains "Test" or "test"
data = filter_out_tests(data, surname_column)
# Convert date columns to datetime
data['Order Date'] = pd.to_datetime(data[order_date_column].str.split(',').str[0], format='%d-%m-%Y')
# Filter data based on selected date range
filtered_combined = data[
(data['Order Date'] >= pd.to_datetime(start_date)) &
(data['Order Date'] <= pd.to_datetime(end_date)) &
(data[source_column].isin(['www nowe', 'mmp nowe']))
]
# Count occurrences of each unique value in the status column for the filtered data
status_counts_combined = filtered_combined[status_column].value_counts()
# Print the status counts for 'www nowe' and 'mmp nowe' combined
print_status_counts(
text_areas['Podsumowanie'],
"Status counts for 'www nowe' and 'mmp nowe' in column 'Źródło zamówienia':",
status_counts_combined,
sum_billed(status_counts_combined),
sum_billed(status_counts_combined, positive=False),
sum_transferred(status_counts_combined),
sum_unfinished(status_counts_combined)
)
# Further filter data to include only rows where column "Nazwa paczki" contains "INT"
filtered_final = filtered_combined[filtered_combined[package_column].str.contains('INT', na=False)]
# Count occurrences of each unique value in the status column for the filtered data
status_counts_final = filtered_final[status_column].value_counts()
# Print the status counts for the final filtered data
print_status_counts(
text_areas['Raporty'],
"Status counts for 'www nowe' and 'mmp nowe' in column 'Źródło zamówienia' with 'INT' in column 'Nazwa paczki':",
status_counts_final,
sum_billed(status_counts_final),
sum_billed(status_counts_final, positive=False),
sum_transferred(status_counts_final),
sum_unfinished(status_counts_final)
)
# Filter data to include only rows where column "Nazwa paczki" contains "MVNO"
filtered_mvno = filtered_combined[filtered_combined[package_column].str.contains('MVNO', na=False)]
# Count occurrences of each unique value in the status column for the filtered data
status_counts_mvno = filtered_mvno[status_column].value_counts()
# Print the status counts for the filtered MVNO data
print_special_status_counts(
text_areas['Raporty'],
"Status counts for 'MVNO' in column 'Nazwa paczki':",
status_counts_mvno
)
# Filter data to include only rows where column "Nazwa paczki" contains "K Vectra TV Smart"
filtered_vtv = filtered_combined[filtered_combined[package_column].str.contains('K Vectra TV Smart', na=False)]
# Count occurrences of each unique value in the status column for the filtered data
status_counts_vtv = filtered_vtv[status_column].value_counts()
# Print the status counts for the filtered VTV data
print_special_status_counts(
text_areas['Raporty'],
"Status counts for 'K Vectra TV Smart' in column 'Nazwa paczki' for 'www nowe' and 'mmp nowe':",
status_counts_vtv
)
# Filter data to include only rows where 'Status zamówienia' is 'zbilingowany' lub 'zbilingowany - POPC' oraz 'Nazwa paczki' zawiera 'INT'
filtered_billed_int = filtered_combined[
((filtered_combined[status_column] == 'zbilingowany') | (filtered_combined[status_column] == 'zbilingowany - POPC')) &
(filtered_combined[package_column].str.contains('INT', na=False))
].copy()
# Extract and count unique dates in the 'Termin wizyty/montażu' column
filtered_billed_int['Installation Date'] = pd.to_datetime(filtered_billed_int[installation_date_column].str.split(' ').str[0], format='%d.%m.%Y')
filtered_billed_int['Date Only'] = filtered_billed_int['Installation Date']
filtered_billed_int['Month'] = filtered_billed_int['Date Only'].dt.strftime('%m')
filtered_billed_int['Day'] = filtered_billed_int['Date Only'].dt.strftime('%d.%m')
date_counts = filtered_billed_int.groupby('Month')['Day'].value_counts().sort_index(level=[0, 1])
# Print the date counts for the 'Date Only' column, grouped and sorted by month
text_areas['Instalacje'].insert(tk.END, "Data instalacji:\n", 'green')
current_month = None
for (month, day), count in date_counts.items():
if month != current_month:
current_month = month
text_areas['Instalacje'].insert(tk.END, f"\n{month}.\n")
text_areas['Instalacje'].insert(tk.END, f"{day}: {count}\n")
# Filter for 'Next day installations' conditions: 'INT' in package column, 'www nowe' or 'mmp nowe' in source column, and 'zbilingowany' in status column
next_day_filtered = filtered_combined[
(filtered_combined[package_column].str.contains('INT', na=False)) &
(filtered_combined[source_column].isin(['www nowe', 'mmp nowe'])) &
((filtered_combined[status_column] == 'zbilingowany') | (filtered_combined[status_column] == 'zbilingowany - POPC'))
].copy()
# Convert date columns to datetime
next_day_filtered['Order Date'] = pd.to_datetime(next_day_filtered[order_date_column], format='%d-%m-%Y, %H:%M:%S')
next_day_filtered['Installation Date'] = pd.to_datetime(next_day_filtered[installation_date_column].str.split(' ').str[0], format='%d.%m.%Y')
# Calculate the 'Next day installations'
next_day_filtered['Next Day'] = next_day_filtered['Order Date'] + timedelta(days=1)
next_day_installations = next_day_filtered[next_day_filtered['Next Day'].dt.date == next_day_filtered['Installation Date'].dt.date]
# Print 'Next day installations by week'
text_areas['Instalacje'].insert(tk.END, "Next day installations by week:\n", 'green')
current_week = None
week_start = None
week_count = 0
for index, row in next_day_installations.iterrows():
week_of_year = row['Installation Date'].isocalendar()[1]
if week_of_year != current_week:
if current_week is not None:
text_areas['Instalacje'].insert(tk.END, f"SUMA: {week_count}\n", 'blue')
current_week = week_of_year
week_start = row['Installation Date'].strftime('%d-%m-%Y')
week_count = 0
text_areas['Instalacje'].insert(tk.END, f"\nWeek starting on {week_start}:\n")
text_areas['Instalacje'].insert(tk.END, f"Order Date: {row['Order Date'].strftime('%d-%m-%Y, %H:%M:%S')} -> Installation Date: {row['Installation Date'].strftime('%d-%m-%Y')}\n")
week_count += 1
if current_week is not None:
text_areas['Instalacje'].insert(tk.END, f"SUMA: {week_count}\n", 'blue')
# Filter and print 'Next day installations by days'
day_diffs = [2, 3, 4, 5]
for day_diff in day_diffs:
text_areas['Instalacje'].insert(tk.END, f"Next day installations by days ({day_diff} dni):\n", 'green')
next_day_filtered[f'Next Day {day_diff}'] = next_day_filtered['Order Date'] + timedelta(days=day_diff)
next_day_installations_day = next_day_filtered[next_day_filtered[f'Next Day {day_diff}'].dt.date == next_day_filtered['Installation Date'].dt.date]
for index, row in next_day_installations_day.iterrows():
text_areas['Instalacje'].insert(tk.END, f"Order Date: {row['Order Date'].strftime('%d-%m-%Y, %H:%M:%S')} -> Installation Date: {row['Installation Date'].strftime('%d-%m-%Y')}\n")
text_areas['Instalacje'].insert(tk.END, f"SUMA: {len(next_day_installations_day)}\n", 'blue')
# Filter for 'TOP 3' conditions: 'INT' in package column, 'www nowe' or 'mmp nowe' in source column, and 'zbilingowany' in status column
top_filtered = filtered_final[
(filtered_final[source_column].isin(['www nowe', 'mmp nowe'])) &
(filtered_final[status_column] == 'zbilingowany')
]
# Get the top 3 most frequent package names
top_packages = top_filtered[package_column].value_counts().head(20)
# Print the top 3 package names
text_areas['Podsumowanie'].insert(tk.END, "TOP 3 'INT' packages:\n", 'green')
for package, count in top_packages.items():
text_areas['Podsumowanie'].insert(tk.END, f"{package}: {count}\n")
# Filter data for 'INT per paczka'
int_per_paczka_filtered = filtered_final[
filtered_final[package_column].str.contains('INT', na=False)
]
# Podstawowe filtrowanie
filtered_data_combined_status = filtered_final[
(filtered_final[source_column].isin(['www nowe', 'mmp nowe'])) &
((filtered_final[status_column] == 'zbilingowany') | (filtered_final[status_column] == 'zbilingowany - POPC'))
]
# 1P: Szukaj nazw zawierających zarówno "INT" jak i "Stalowy"
int_stalowy_filtered_data = filtered_data_combined_status[
filtered_data_combined_status[package_column].str.contains('INT', na=False) &
(
(~filtered_data_combined_status[package_column].str.contains('DTV', na=False)) |
(filtered_data_combined_status[package_column].str.contains('Stalowy', na=False))
)
]
# Sumowanie liczby rekordów spełniających oba warunki
count_1p = int_stalowy_filtered_data.shape[0]
text_areas['Podsumowanie'].insert(tk.END, f"1P = {count_1p}\n", 'blue')
# 2P: Szukaj nazw zawierających tylko "DTV"
dtv_int_filtered_data = filtered_data_combined_status[
filtered_data_combined_status[package_column].str.contains('DTV.*INT', na=False) &
~filtered_data_combined_status[package_column].str.contains('Stalowy', na=False)
]
# Liczba rekordów dla 2P
count_2p = len(dtv_int_filtered_data)
text_areas['Podsumowanie'].insert(tk.END, f"2P = {count_2p}\n", 'blue')
# Godziny: Filtrowanie dla 'www nowe', 'mmp nowe', 'zbilingowany', 'zbilingowany - POPC', oraz 'INT' w kolumnie Nazwa paczki
filtered_data_godziny = data[
(data[source_column].isin(['www nowe', 'mmp nowe'])) &
(data[status_column].isin(['zbilingowany', 'zbilingowany - POPC'])) &
(data[package_column].str.contains('INT', na=False))
]
# Wyodrębnianie godziny z kolumny "Data złożenia" (order_date_column = kolumna H)
filtered_data_godziny['Godzina'] = pd.to_datetime(filtered_data_godziny[order_date_column].str.split(',').str[1].str.strip(), format='%H:%M:%S').dt.hour
# Zliczanie liczby rekordów w każdej godzinie (od 0 do 23)
hourly_counts = filtered_data_godziny['Godzina'].value_counts().sort_index()
# Reindexowanie wyników, aby upewnić się, że mamy wszystkie godziny od 0 do 23
hourly_counts = hourly_counts.reindex(range(24), fill_value=0)
# Wyświetlanie wyników per godzina
text_areas['Podsumowanie'].insert(tk.END, "Wyniki dla godzin:\n", 'green')
for hour, count in hourly_counts.items():
text_areas['Podsumowanie'].insert(tk.END, f"Godzina {hour}: {count}\n")
# Count occurrences of specific package types
int_zloty_count = int_per_paczka_filtered[
int_per_paczka_filtered[package_column].str.contains('Złoty|Zloty|Z³oty', na=False)
].shape[0]
int_srebrny_count = int_per_paczka_filtered[
int_per_paczka_filtered[package_column].str.contains('Srebrny', na=False)
].shape[0]
int_platynowy_count = int_per_paczka_filtered[
int_per_paczka_filtered[package_column].str.contains('Platynowy', na=False)
].shape[0]
# Print the counts for 'INT per paczka Zbilingowany + Negatywny'
text_areas['Podsumowanie'].insert(tk.END, "INT per paczka:\n", 'green')
text_areas['Podsumowanie'].insert(tk.END, f"Złoty: {int_zloty_count}\n")
text_areas['Podsumowanie'].insert(tk.END, f"Srebrny: {int_srebrny_count}\n")
text_areas['Podsumowanie'].insert(tk.END, f"Platynowy: {int_platynowy_count}\n")
def filter_and_display_installation_hours(data, text_area, start_date, end_date):
if data is not None:
text_area.delete('1.0', tk.END) # Clear previous text
# Get the data from columns "D", "L", "K", and package column
source_column = data.columns[3] # Assuming "D" is the fourth column (index 3)
status_column = data.columns[11] # Assuming "L" is the twelfth column (index 11)
order_date_column = data.columns[7] # Assuming "H" is the eighth column (index 7)
installation_date_column = data.columns[10] # Assuming "K" is the eleventh column (index 10)
package_column = data.columns[28] if len(data.columns) > 28 and data.columns[28] == 'Nazwa paczki' else data.columns[32]
# Filter data
filtered_data = data[
(data[source_column].isin(['www nowe', 'mmp nowe'])) &
(data[status_column].isin(['zbilingowany', 'zbilingowany - POPC'])) &
(data[package_column].str.contains('INT', na=False))
]
# Convert date columns to datetime
filtered_data['Order Date'] = pd.to_datetime(filtered_data[order_date_column].str.split(',').str[0], format='%d-%m-%Y')
filtered_data = filtered_data[
(filtered_data['Order Date'] >= pd.to_datetime(start_date)) &
(filtered_data['Order Date'] <= pd.to_datetime(end_date))
]
# Extract and count installation hours
filtered_data['Installation Hour'] = filtered_data[installation_date_column].str.extract(r'(\d{2}:\d{2} - \d{2}:\d{2})')
installation_hours_counts = filtered_data['Installation Hour'].value_counts().sort_index()
# Print installation hours
text_area.insert(tk.END, "Godzina instalacji:\n", 'green')
for hour, count in installation_hours_counts.items():
text_area.insert(tk.END, f"{hour}: {count}\n")
def filter_and_display_arpu(data, text_area, start_date, end_date):
if data is not None:
text_area.delete('1.0', tk.END) # Clear previous text
# Get the data from columns "D", "L", "P", "K" and "AH"
source_column = data.columns[3] # Assuming "D" is the fourth column (index 3)
status_column = data.columns[11] # Assuming "L" is the twelfth column (index 11)
order_date_column = data.columns[7] # Assuming "H" is the eighth column (index 7)
package_column = data.columns[28] if len(data.columns) > 28 and data.columns[28] == 'Nazwa paczki' else data.columns[32]
price_column = data.columns[33] if len(data.columns) > 33 and data.columns[33] == 'Cena' else None
if price_column is None:
text_area.insert(tk.END, "Cena column not found.\n", 'red')
return
# Filter data
filtered_data = data[
(data[source_column].isin(['www nowe', 'mmp nowe'])) &
(data[status_column].isin(['zbilingowany', 'zbilingowany - POPC'])) &
(data[package_column].str.contains('INT', na=False))
]
# Convert date columns to datetime
filtered_data['Order Date'] = pd.to_datetime(filtered_data[order_date_column].str.split(',').str[0], format='%d-%m-%Y')
filtered_data = filtered_data[
(filtered_data['Order Date'] >= pd.to_datetime(start_date)) &
(filtered_data['Order Date'] <= pd.to_datetime(end_date))
]
# Ensure the price column is numeric
filtered_data[price_column] = filtered_data[price_column].str.replace(',', '.').astype(float)
# Group by week number and calculate the average ARPU for each week
filtered_data['Week'] = filtered_data['Order Date'].dt.isocalendar().week
weekly_arpu = filtered_data.groupby('Week')[price_column].mean()
# Print weekly ARPU
text_area.insert(tk.END, "Średnie ARPU w tygodniu:\n", 'green')
for week, arpu in weekly_arpu.items():
text_area.insert(tk.END, f"Tydzień {week}: {arpu:.2f}\n")
def filter_and_display_commitment_period(data, text_area, start_date, end_date):
if data is not None:
text_area.delete('1.0', tk.END) # Clear previous text
# Get the data from columns "D", "L", "AI"
source_column = data.columns[3] # Assuming "D" is the fourth column (index 3)
status_column = data.columns[11] # Assuming "L" is the twelfth column (index 11)
order_date_column = data.columns[7] # Assuming "H" is the eighth column (index 7)
commitment_column = data.columns[34] if len(data.columns) > 34 and data.columns[34] in ['Okres obowiązywania', 'Okres zobowiązania', 'Okres obowi¹zywania'] else None
if commitment_column is None:
text_area.insert(tk.END, "Okres obowiązywania column not found.\n", 'red')
return
# Filter data
filtered_data = data[
(data[source_column].isin(['www nowe', 'mmp nowe'])) &
(data[status_column].isin(['zbilingowany', 'zbilingowany - POPC'])) &
(data[commitment_column].isin(['12', '24', 'brak']))
]
# Convert date columns to datetime
filtered_data['Order Date'] = pd.to_datetime(filtered_data[order_date_column].str.split(',').str[0], format='%d-%m-%Y')
filtered_data = filtered_data[
(filtered_data['Order Date'] >= pd.to_datetime(start_date)) &
(filtered_data['Order Date'] <= pd.to_datetime(end_date))
]
# Group by day and count the occurrences of each unique value in the commitment column
filtered_data['Day'] = filtered_data['Order Date'].dt.strftime('%d-%m-%Y')
daily_commitment_counts = filtered_data.groupby(['Day', commitment_column]).size().unstack(fill_value=0)
# Print daily commitment period counts
text_area.insert(tk.END, "Okres zobowiazania w dniu:\n", 'green')
for day, counts in daily_commitment_counts.iterrows():
text_area.insert(tk.END, f"{day}:\n")
for period, count in counts.items():
text_area.insert(tk.END, f"{period}: {count}\n")
# Group by week number and count the occurrences of each unique value in the commitment column
filtered_data['Week'] = filtered_data['Order Date'].dt.isocalendar().week
weekly_commitment_counts = filtered_data.groupby(['Week', commitment_column]).size().unstack(fill_value=0)
# Print weekly commitment period counts
text_area.insert(tk.END, "\nOkres zobowiazania w tygodniu:\n", 'green')
for week, counts in weekly_commitment_counts.iterrows():
text_area.insert(tk.END, f"Tydzień {week}:\n")
for period, count in counts.items():
text_area.insert(tk.END, f"{period}: {count}\n")
def filter_and_display_daily_data(data, text_area, start_date, end_date):
if data is not None:
text_area.delete('1.0', tk.END) # Clear previous text
# Get the data from columns "D", "L", "AG"
source_column = data.columns[3] # Assuming "D" is the fourth column (index 3)
status_column = data.columns[11] # Assuming "L" is the twelfth column (index 11)
order_date_column = data.columns[7] # Assuming "H" is the eighth column (index 7)
package_column = data.columns[32] if len(data.columns) > 32 and data.columns[32] == 'Nazwa paczki' else None
if package_column is None:
text_area.insert(tk.END, "Nazwa paczki column not found.\n", 'red')
return
# Convert date columns to datetime
data['Order Date'] = pd.to_datetime(data[order_date_column].str.split(',').str[0], format='%d-%m-%Y')
# Filter data based on selected date range
filtered_data = data[
(data['Order Date'] >= pd.to_datetime(start_date)) &
(data['Order Date'] <= pd.to_datetime(end_date)) &
(data[source_column].isin(['www nowe', 'mmp nowe']))
]
# Filter further for specific packages
filtered_int = filtered_data[filtered_data[package_column].str.contains('INT', na=False)]
filtered_mvno = filtered_data[filtered_data[package_column].str.contains('MVNO', na=False)]
filtered_vtv = filtered_data[filtered_data[package_column].str.contains('K Vectra TV Smart', na=False)]
# Group by day and status
daily_int = filtered_int.groupby([filtered_int['Order Date'].dt.strftime('%d-%m-%Y'), status_column]).size().unstack(fill_value=0)
daily_mvno = filtered_mvno.groupby([filtered_mvno['Order Date'].dt.strftime('%d-%m-%Y'), status_column]).size().unstack(fill_value=0)
daily_vtv = filtered_vtv.groupby([filtered_vtv['Order Date'].dt.strftime('%d-%m-%Y'), status_column]).size().unstack(fill_value=0)
# Print daily data
text_area.insert(tk.END, "Dane dzienne:\n", 'green')
for day in pd.date_range(start=start_date, end=end_date):
day_str = day.strftime('%d-%m-%Y')
text_area.insert(tk.END, f"\n{day_str}\n", 'blue')
if day_str in daily_int.index:
text_area.insert(tk.END, "Status counts for 'www nowe' and 'mmp nowe' in column 'Źródło zamówienia' with 'INT' in column 'Nazwa paczki':\n", 'green')
for status, count in daily_int.loc[day_str].items():
text_area.insert(tk.END, f"{status}: {count}\n")
text_area.insert(tk.END, f"\nzbilingowany + zbilingowany - POPC: {sum_billed(daily_int.loc[day_str])}\n", 'yellow')
if day_str in daily_mvno.index:
text_area.insert(tk.END, "Status counts for 'MVNO' in column 'Nazwa paczki':\n", 'green')
for status, count in daily_mvno.loc[day_str].items():
text_area.insert(tk.END, f"{status}: {count}\n")
if day_str in daily_vtv.index:
text_area.insert(tk.END, "Status counts for 'K Vectra TV Smart' in column 'Nazwa paczki' for 'www nowe' and 'mmp nowe':\n", 'green')
for status, count in daily_vtv.loc[day_str].items():
text_area.insert(tk.END, f"{status}: {count}\n")
def export_to_excel(text_area):
file_path = filedialog.asksaveasfilename(defaultextension=".xlsx", filetypes=[("Excel files", "*.xlsx")])
if file_path:
# Extract the text from the text area
text_content = text_area.get("1.0", tk.END)
# Split the text into lines and initialize the data list
lines = text_content.split("\n")
data = []
current_date = None
for line in lines:
if line.startswith("\n") or line == "":
continue
if line.startswith("\t"):
line = line.strip()
parts = line.split(":")
if len(parts) == 2:
product, value = parts
data.append([current_date, product, value.strip()])
else:
current_date = line.strip()
# Create a DataFrame
df = pd.DataFrame(data, columns=["Data", "Produkt", "Wartość"])
# Export the DataFrame to an Excel file
df.to_excel(file_path, index=False)
print(f"Data exported to {file_path}")
def on_check_button_click(text_areas, start_date, end_date):
filter_and_display_data(global_data_2024, text_areas['2024'], start_date, end_date)
filter_and_display_installation_hours(global_data_2024, text_areas['2024']["Godzina instalacji"], start_date, end_date)
filter_and_display_arpu(global_data_2024, text_areas['2024']["ARPu"], start_date, end_date)
filter_and_display_commitment_period(global_data_2024, text_areas['2024']["Okres zobowiazania"], start_date, end_date)
filter_and_display_daily_data(global_data_2024, text_areas['2024']["Daty"], start_date, end_date)
filter_and_display_data(global_data_2023, text_areas['2023'], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_installation_hours(global_data_2023, text_areas['2023']["Godzina instalacji"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_arpu(global_data_2023, text_areas['2023']["ARPu"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_commitment_period(global_data_2023, text_areas['2023']["Okres zobowiazania"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_daily_data(global_data_2023, text_areas['2023']["Daty"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
def on_load_file_2024(root, text_areas, start_date, end_date):
global global_data_2024
file_path = filedialog.askopenfilename()
if file_path:
global_data_2024 = load_data(file_path)
filter_and_display_data(global_data_2024, text_areas['2024'], start_date, end_date)
filter_and_display_installation_hours(global_data_2024, text_areas['2024']["Godzina instalacji"], start_date, end_date)
filter_and_display_arpu(global_data_2024, text_areas['2024']["ARPu"], start_date, end_date)
filter_and_display_commitment_period(global_data_2024, text_areas['2024']["Okres zobowiazania"], start_date, end_date)
filter_and_display_daily_data(global_data_2024, text_areas['2024']["Daty"], start_date, end_date)
update_title_with_date_range(root, global_data_2024, 2024)
def on_load_file_2023(root, text_areas, start_date, end_date):
global global_data_2023
file_path = filedialog.askopenfilename()
if file_path:
global_data_2023 = load_data(file_path)
filter_and_display_data(global_data_2023, text_areas['2023'], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_installation_hours(global_data_2023, text_areas['2023']["Godzina instalacji"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_arpu(global_data_2023, text_areas['2023']["ARPu"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_commitment_period(global_data_2023, text_areas['2023']["Okres zobowiazania"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
filter_and_display_daily_data(global_data_2023, text_areas['2023']["Daty"], start_date.replace(year=start_date.year - 1), end_date.replace(year=end_date.year - 1))
update_title_with_date_range(root, global_data_2023, 2023)
def update_title_with_date_range(root, data, year):
if data is not None:
order_date_column = data.columns[7] # Assuming "H" is the eighth column (index 7)
data['Order Date'] = pd.to_datetime(data[order_date_column].str.split(',').str[0], format='%d-%m-%Y')
min_date = data['Order Date'].min().strftime('%d/%m/%Y')
max_date = data['Order Date'].max().strftime('%d/%m/%Y')
existing_title = root.title()
new_title = f" - Od: {min_date} do {max_date} - {year}"
if f" - {year}" in existing_title:
existing_title = existing_title.split(" - ")[0]
root.title(existing_title + new_title)
else:
root.title(existing_title + new_title)
def main():
global global_data_2024
global global_data_2023
global_data_2024 = None # Global variable to hold the loaded data for 2024
global_data_2023 = None # Global variable to hold the loaded data for 2023
root = tk.Tk()
root.title("Raport Testowy")
notebook = ttk.Notebook(root)
# Create main tabs
tab_2024 = ttk.Frame(notebook)
notebook.add(tab_2024, text="2024")
notebook_2024 = ttk.Notebook(tab_2024)
tab_2023 = ttk.Frame(notebook)
notebook.add(tab_2023, text="2023")
notebook_2023 = ttk.Notebook(tab_2023)
# Create frames for each sub-tab in 2024
summary_frame_2024 = ttk.Frame(notebook_2024)
installations_frame_2024 = ttk.Frame(notebook_2024)
reports_frame_2024 = ttk.Frame(notebook_2024)
installation_hours_frame_2024 = ttk.Frame(notebook_2024)
arpu_frame_2024 = ttk.Frame(notebook_2024)
commitment_period_frame_2024 = ttk.Frame(notebook_2024)
daily_frame_2024 = ttk.Frame(notebook_2024)
notebook_2024.add(summary_frame_2024, text="Podsumowanie")
notebook_2024.add(installations_frame_2024, text="Instalacje")
notebook_2024.add(reports_frame_2024, text="Raporty")
notebook_2024.add(installation_hours_frame_2024, text="Godzina instalacji")
notebook_2024.add(arpu_frame_2024, text="ARPu")
notebook_2024.add(commitment_period_frame_2024, text="Okres zobowiazania")
notebook_2024.add(daily_frame_2024, text="Daty")
notebook_2024.pack(expand=1, fill='both')
# Create frames for each sub-tab in 2023
summary_frame_2023 = ttk.Frame(notebook_2023)
installations_frame_2023 = ttk.Frame(notebook_2023)
reports_frame_2023 = ttk.Frame(notebook_2023)
installation_hours_frame_2023 = ttk.Frame(notebook_2023)
arpu_frame_2023 = ttk.Frame(notebook_2023)
commitment_period_frame_2023 = ttk.Frame(notebook_2023)
daily_frame_2023 = ttk.Frame(notebook_2023)
notebook_2023.add(summary_frame_2023, text="Podsumowanie")
notebook_2023.add(installations_frame_2023, text="Instalacje")
notebook_2023.add(reports_frame_2023, text="Raporty")
notebook_2023.add(installation_hours_frame_2023, text="Godzina instalacji")
notebook_2023.add(arpu_frame_2023, text="ARPu")
notebook_2023.add(commitment_period_frame_2023, text="Okres zobowiazania")
notebook_2023.add(daily_frame_2023, text="Daty")
notebook_2023.pack(expand=1, fill='both')
notebook.pack(expand=1, fill='both')
text_areas = {
'2024': {
"Podsumowanie": scrolledtext.ScrolledText(summary_frame_2024, wrap=tk.WORD, width=100, height=40),
"Instalacje": scrolledtext.ScrolledText(installations_frame_2024, wrap=tk.WORD, width=100, height=40),
"Raporty": scrolledtext.ScrolledText(reports_frame_2024, wrap=tk.WORD, width=100, height=40),
"Godzina instalacji": scrolledtext.ScrolledText(installation_hours_frame_2024, wrap=tk.WORD, width=100, height=40),
"ARPu": scrolledtext.ScrolledText(arpu_frame_2024, wrap=tk.WORD, width=100, height=40),
"Okres zobowiazania": scrolledtext.ScrolledText(commitment_period_frame_2024, wrap=tk.WORD, width=100, height=40),
"Daty": scrolledtext.ScrolledText(daily_frame_2024, wrap=tk.WORD, width=100, height=40)
},
'2023': {
"Podsumowanie": scrolledtext.ScrolledText(summary_frame_2023, wrap=tk.WORD, width=100, height=40),
"Instalacje": scrolledtext.ScrolledText(installations_frame_2023, wrap=tk.WORD, width=100, height=40),
"Raporty": scrolledtext.ScrolledText(reports_frame_2023, wrap=tk.WORD, width=100, height=40),
"Godzina instalacji": scrolledtext.ScrolledText(installation_hours_frame_2023, wrap=tk.WORD, width=100, height=40),
"ARPu": scrolledtext.ScrolledText(arpu_frame_2023, wrap=tk.WORD, width=100, height=40),
"Okres zobowiazania": scrolledtext.ScrolledText(commitment_period_frame_2023, wrap=tk.WORD, width=100, height=40),
"Daty": scrolledtext.ScrolledText(daily_frame_2023, wrap=tk.WORD, width=100, height=40)
}
}
for year_text_areas in text_areas.values():
for text_area in year_text_areas.values():
text_area.pack(padx=10, pady=10)
# Configure text tags for color
text_area.tag_config('green', foreground='green')
text_area.tag_config('yellow', foreground='yellow')
text_area.tag_config('red', foreground='red')
text_area.tag_config('cyan', foreground='cyan')
text_area.tag_config('magenta', foreground='magenta')
text_area.tag_config('blue', foreground='blue')
# Date picker
date_frame = ttk.Frame(root)
date_frame.pack(pady=10)
start_label = ttk.Label(date_frame, text="Start Date:")
start_label.pack(side=tk.LEFT, padx=(0, 5))
start_date = DateEntry(date_frame, width=12, background='darkblue', foreground='white', borderwidth=2, date_pattern='d/m/y')
start_date.pack(side=tk.LEFT, padx=(0, 15))
end_label = ttk.Label(date_frame, text="End Date:")
end_label.pack(side=tk.LEFT, padx=(0, 5))
end_date = DateEntry(date_frame, width=12, background='darkblue', foreground='white', borderwidth=2, date_pattern='d/m/y')
end_date.pack(side=tk.LEFT, padx=(0, 15))
check_button = ttk.Button(date_frame, text="Sprawdź", command=lambda: on_check_button_click(text_areas, start_date.get_date(), end_date.get_date()))
check_button.pack(side=tk.LEFT, padx=(0, 15))
export_button = ttk.Button(date_frame, text="Export", command=lambda: export_to_excel(text_areas['2024']["Daty"]))
export_button.pack(side=tk.LEFT, padx=(0, 15))
refresh_button = ttk.Button(date_frame, text="Odśwież", command=lambda: filter_and_display_data(global_data_2024, text_areas['2024'], start_date.get_date(), end_date.get_date()))
refresh_button.pack(side=tk.LEFT, padx=(0, 15))
root.bind("<Control-o>", lambda event: on_load_file_2024(root, text_areas, start_date.get_date(), end_date.get_date()))
root.bind("<Control-p>", lambda event: on_load_file_2023(root, text_areas, start_date.get_date(), end_date.get_date()))
text_areas['2024']['Podsumowanie'].insert(tk.END, "Drag and drop a CSV file into this window or press Ctrl+O to open a file for 2024.\n")
text_areas['2023']['Podsumowanie'].insert(tk.END, "Drag and drop a CSV file into this window or press Ctrl+P to open a file for 2023.\n")
root.mainloop()
if __name__ == "__main__":
main()
Editor is loading...
Leave a Comment