/ PYTHON

Python으로 웹 스크래퍼 만들기-1

Python으로 웹 스크래퍼 만들기-1

indeed.py파일


import requests
from bs4 import BeautifulSoup

LIMIT = 50

URL = f"https://kr.indeed.com/%EC%B7%A8%EC%97%85?q=python&limit={LIMIT}&radius=25&start=0&vjk=0699e6dbb5a0ec38"

def get_last_page():
  result = requests.get(URL)
  soup = BeautifulSoup(result.text,"html.parser")
  # html의 내용을 가져온다.
  pagination = soup.find("div",{"class" : "pagination"})
  # html의 내용중 pagination을 찾는다.                          
  links = pagination.find_all('a')
  pages = [] #for반복문으로 찾은 link을 리스트에 담아주기 위해서 pages라는 빈 리스트를 생성.
  for link in links[:-1]:
    pages.append(int(link.string)
                )
  max_page = pages[-1]
  return max_page

def extract_job(html):
    title = html.find("span",title=True).text
    company = html.find("span", {"class" : "companyName"}).string
    location = html.find("div",{"class" : "companyLocation"}).string
    job_id = html["data-jk"]
    return{'title': title, 'company': company, 'location': location,'link' : f"https://kr.indeed.com/%EC%B7%A8%EC%97%85?q=python&radius=25&start=50&vjk={job_id} "}

def extract_jobs(last_pages):
  jobs = []
  for page in range(last_pages):
    print(f"Scrapping page {page}")
    result = requests.get(f"{URL}&start={page*LIMIT}")
    soup = BeautifulSoup(result.text,"html.parser")
    results = soup.find_all('a',{"class" : "fs-unmask"} )
    for result in results:
      job = extract_job(result)
      jobs.append(job)
  return(jobs)


def get_jobs():
  last_page = get_last_page()
  jobs = extract_jobs(last_page)
  return jobs


main.py


from indeed import get_jobs as get_indeed_jobs
from so import get_jobs as get_so_jobs


#indeed_jobs = get_indeed_jobs()
so_jobs = get_so_jobs()
#print(indeed_jobs)

so.py


import requests
from bs4 import BeautifulSoup
# s-pagination
URL = f"https://stackoverflow.com/jobs/companies?q=python"


def get_last_page():
  result = requests.get(URL)
  soup = BeautifulSoup(result.text, "html.parser")  
  pages = soup.find("div", {"class":"s-pagination"}).find_all("a")
  last_page = pages[-2].get_text(strip=True)
  return int(last_page)


def extract_jobs(last_page):
  jobs = []
  for page in range(last_page):
    result = requests.get(f"{URL}&pg={page+1}")
    soup = BeautifulSoup(result.text, "html.parser")
    results = soup.find_all("div", {"class":"d-flex"})
    for result in results:
      print(results)
      
    


def get_jobs():
  last_page = get_last_page()
  jobs = extract_jobs(last_page)
  return jobs