Untitled

 avatar
unknown
plain_text
3 years ago
4.4 kB
3
Indexable
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import csv
import time
import glob
import os, fnmatch
import pandas as pd
import re, os.path
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.firefox.options import Options as FirefoxOptions


driver = webdriver.Chrome("C:/Users/apskaita3/Desktop/chromedriver.exe")
driver.maximize_window()
driver.get("http://www.nasdaqomxnordic.com/shares/historicalprices")


fromDate = driver.find_element_by_id("FromDate")
fromDate.click()
fromDate.clear()
fromDate.send_keys("2006-01-01")
time.sleep(4)
inputElement = driver.find_element_by_id("instSearchHistorical")
inputElement.send_keys("Afarak")

#driver.implicitly_wait(6)
time.sleep(4)
driver.find_element_by_xpath("/html/body/ul[2]/li[2]/a/div[1]").click()
#inputElement.send_keys(Keys.ENTER)

#driver.find_element_by_css_selector('div.acSearchResult-fnm').click()

time.sleep(2)

driver.find_element_by_id("showAdjusted").click()

		
time.sleep(4)
driver.find_element_by_id("exportExcel").click()

time.sleep(5)
driver.close()


#path = "C:/Users/apskaita3/Downloads/"
accepted_extensions = ["csv"]
filenames = [fn for fn in os.listdir("C:/Users/apskaita3/Downloads/") if fn.split(".")[-1] in accepted_extensions]
#filenames = sorted(filenames, key=lambda t: -os.stat(t).st_mtime)
#print(filenames)
#filenames=re.search("2006",filenames) 
#print(filenames)
 
filenames = [s for s in filenames if "-2006-01-01" in s]
print(filenames)
a1=filenames[0]
#print(filenames)

#li=[]
#files = glob.glob("2006.csv")
#files.sort(key=os.path.getmtime)
#print("\n".join(files))
#li.append(files)
#f1=fnmatch.filter(os.listdir('.'), '2006.csv')
#print(f1)
#for file in os.listdir("C:/Users/apskaita3/Downloads/"):
   # if 'AFAGR' in file:
   #     a=file
    #    print(a)
try:
    old_csv1 = pd.read_csv(f'C:/Users/apskaita3/Downloads/{a1}',sep=';',skiprows=1)
except pd.errors.EmptyDataError:
  #old_csv14 = pd.DataFrame()
    old_csv1  = pd.DataFrame(columns=['Date', 'Bid', 'Ask', 'Opening price','High price','Low price','Closing price', 'Average price','Total volume','Turnover','Trades'])
   # else:
     #   pass

        #print(os.path.join(file))
        #f'C:/Users/apskaita3/Downloads/{a}
 

#old_csv69 = pd.read_csv('C:/Users/apskaita3/Downloads/share_export (70).csv',sep=';',skiprows=1)
new_csv1 = pd.read_csv('C:/Users/apskaita3/Finansų analizės ir valdymo sprendimai, UAB/Rokas Toomsalu - Power BI analitika/Integracijos/1_Public comapnies analytics/2. Nasdaq Helsinki/Nasdaq_historical_prices/AFAGR.csv',sep=';')
if ('sep='  in new_csv1.columns.tolist()):
    new_csv1= pd.read_csv('C:/Users/apskaita3/Finansų analizės ir valdymo sprendimai, UAB/Rokas Toomsalu - Power BI analitika/Integracijos/1_Public comapnies analytics/2. Nasdaq Helsinki/Nasdaq_historical_prices/AFAGR.csv',sep=';',skiprows=1)
else:
    new_csv1= pd.read_csv('C:/Users/apskaita3/Finansų analizės ir valdymo sprendimai, UAB/Rokas Toomsalu - Power BI analitika/Integracijos/1_Public comapnies analytics/2. Nasdaq Helsinki/Nasdaq_historical_prices/AFAGR.csv',sep=',')

new_df1 = pd.concat([old_csv1, new_csv1]).reset_index(drop=True)
#old_csv1=new_csv1
new_df1.columns = old_csv1.columns
new_df1['Total volume'] = new_df1['Total volume'].astype(str)
new_df1['Total volume'] = [x.replace(',', '.') for x in new_df1['Total volume']]
new_df1['Total volume'] = new_df1['Total volume'].astype(float)
new_df1=new_df1.sort_values('Date', ascending = False).drop_duplicates()
if 'Unnamed: 11' in new_df1.columns.tolist():
    del new_df1['Unnamed: 11']
else:
    pass
new_df1['Date']=new_df1['Date'].drop_duplicates()
new_df1=new_df1[new_df1['Date'].notnull()]
new_df1.to_csv('C:/Users/apskaita3/Finansų analizės ir valdymo sprendimai, UAB/Rokas Toomsalu - Power BI analitika/Integracijos/1_Public comapnies analytics/2. Nasdaq Helsinki/Nasdaq_historical_prices/AFAGR.csv',index = False)
print(new_df1.duplicated())




os.chdir("C:/Users/apskaita3/Downloads")
for e in os.listdir():
  if "-2006-01-01" in e:
    os.remove(e)
  else:
    pass
        


filenames.clear()





Editor is loading...