본문으로 바로가기

네이버 증권

import urllib.request as request
from bs4 import BeautifulSoup as bs

url = "https://finance.naver.com/marketindex/"

data = request.urlopen(url).read()

soup = bs(data, "html.parser")

while True:
    print("""
    1. 미국USD
    2. 일본JPY(100엔)
    3. 유럽연합EUR
    4. 중국CNY
    5. 일본 엔/달러
    6. 달러/유로
    7. 달러/영국파운드
    8. 달러 인덱스
    9. WTI
    10. 휘발유
    11. 국제 금
    12. 국내 금
    
    0. 종료
    """)
    cn = None
    txt = None

    choice = int(input())

    if choice == 1:
        cn = 'usd'
        txt = 'txt_krw'
        
    elif choice == 2: 
        cn = 'jpy'
        txt = 'txt_krw'
        
    elif choice == 3:
        cn = 'eur'
        txt = 'txt_krw'
        
    elif choice == 4: 
        cn = 'cny'
        txt = 'txt_krw'
        
    elif choice == 5: 
        cn = 'jpy_usd'
        txt = 'txt_jpy'
        
    elif choice == 6: 
        cn = 'usd_eur'
        txt = 'txt_usd'
        
    elif choice == 7: 
        cn = 'usd_gbp'
        txt = 'txt_usd'
        
    elif choice == 8: 
        cn = 'usd_idx'
        
    elif choice == 9: 
        cn = 'wti'
        txt = 'txt_usd'
        
    elif choice == 10: 
        cn = 'gasoline'
        txt = 'txt_krw'
        
    elif choice == 11: 
        cn = 'gold_inter'
        txt = 'txt_usd'
        
    elif choice == 12: 
        cn = 'gold_domestic'
        txt = 'txt_krw'
        
    elif choice == 0:
        print("종료합니다.")
        break

    if cn != None:
        print(soup.select_one("a."+cn+" > h3.h_lst").string, end=" : ")
        print(soup.select_one("a."+cn+" > div.head_info > span.value").string, end="")
        
        if(txt != None):
            print(soup.select_one("span."+txt).string)
        
    else:
        print("올바른 숫자를 입력해주세요.")

 

네이버 뉴스 IT/과학 헤드라인

import requests
from bs4 import BeautifulSoup

url = 'https://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105'
print("url = ", url)
headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.39.132 Safari/53.36'}

html = requests.get(url, headers = headers)

soup = BeautifulSoup(html.text, 'html.parser')

for str in soup.select("a.cluster_text_headline"):
    print(str.string)
반응형