同时说最近火车票难买,我就帮他用个脚本监控 一下。

最近高铁票比较难买,还有什么候补。要不停的刷,才有时候可以捡漏。有时候明明候补了,到快开车告诉你余票不足,候补失败。

凡事预则立,我打算写个脚本提前两个星期就监控起来,这样就可以提高买到票的几率了。

先是考虑用接口,发现行不通,手动打开页面都OK,就是接口调不到数据。只好曲线救国,用selenium打开页面,用BeautifulSoup来获取页面内容。发现打开页面后,可以直接用数据清洗。大概的代码是这样的:

import timefrom selenium import webdriverfrom selenium.webdriver.common.by import Byfrom selenium.webdriver.support import expected_conditions as ECfrom selenium.webdriver.support.select import Selectfrom selenium.webdriver.support.wait import WebDriverWaitfrom bs4 import BeautifulSoupimport pandasimport arrow
Max_Time_Out = 30Time_Out = 10sh_to_sz = "18002400"sz_to_sh = "06001200"sh_to_sz_train = "G7026|G7260|G7062|G7226|G7028"sh_station = 'cc_from_station_上海_check'sz_to_sh_train = "D3125|G7001|G7037|G7039"sz_station = 'cc_from_station_苏州_check'
time_id = "cc_start_time"now = arrow.now().format('YYYY-MM-DD HH:MM:SS')
option = webdriver.ChromeOptions()
option.add_argument('disable-infobars')# option.binary_location="/Applications/AppleTools /Google Chrome.app/Contents/MacOS/Google Chrome"option.add_argument('headless')
sh_to_sz_url = "https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs=%E4%B8%8A%E6%B5%B7,SHH&ts=%E8%8B%8F%E5%B7%9E,SZH&date={}&flag=N,N,Y"sz_to_sh_url = "https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs=%E8%8B%8F%E5%B7%9E,SZH&ts=%E4%B8%8A%E6%B5%B7,SHH&date={}&flag=N,N,Y"
driver = webdriver.Chrome(chrome_options=option)
driver.set_page_load_timeout(Max_Time_Out)

def open_page(url):    try:        driver.get(url)    except TimeoutError:        print("cannot open the page for {} seconds".format(Max_Time_Out))        driver.execute_script('window.stop()')

def find_element(obj):    WebDriverWait(driver, Time_Out).until(EC.visibility_of_element_located((By.ID, obj)))    element = WebDriverWait(driver, Time_Out).until(lambda x: driver.find_element(By.ID, obj))    return element

def type(obj, value):    find_element(obj).clear()    find_element(obj).send_keys(value)

def clickat(obj):    WebDriverWait(driver, Time_Out).until(EC.element_to_be_clickable((By.ID, obj)))    find_element(obj).click()

def toggle_checkbox(station_id):    inputs = driver.find_elements_by_tag_name("input")    for input in inputs:        if input.get_attribute("value") == 'G':            input.click()        if input.get_attribute("value") == 'D':            input.click()        if input.get_attribute("id") == station_id:            input.click()

def get_today():    today = arrow.now()    if today.weekday() not in [5, 6]:        return today.format('YYYY-MM-DD')

def get_next_day():    next_day = arrow.now().shift(days=+1)    if next_day.weekday() not in [5, 6]:        return next_day.format('YYYY-MM-DD')

def get_next_two_monday():    next_two_monday = [arrow.now().shift(days=x).format('YYYY-MM-DD') for x in range(1, 15) if                       arrow.now().shift(days=x).weekday() == 0]    return next_two_monday

def get_next_two_friday():    next_two_friady = [arrow.now().shift(days=x).format('YYYY-MM-DD') for x in range(1, 15) if                       arrow.now().shift(days=x).weekday() == 4]    return next_two_friady

def select_time(obj, time_zone, station_id):    '''
    :param driver:    :param obj:    :return:
    <select class="select-small" id="cc_start_time">    <option value="00002400">00:00--24:00</option><option value="00000600">00:00--06:00</option><option value="06001200">06:00--12:00</option><option value="12001800">12:00--18:00</option><option value="18002400">18:00--24:00</option></select>    '''
    start_time = find_element(obj)    get_start = Select(start_time)    get_start.select_by_value(time_zone)    toggle_checkbox(station_id)    time.sleep(3)

def get_trains(url, city_time, station_id, trains):    open_page(url)    select_time(time_id, city_time, station_id)    ht = driver.page_source    html_pd = pandas.read_html(ht)
    pd = html_pd[1]
    pd3 = pd[pd['车次'].str.contains(trains, na=False)]
    coloum_list = ["车次", "二等座", "无座"]    # print(pd3[coloum_list])
    return pd3[coloum_list]      if __name__ == "__main__":    today = get_today()    next_day = get_next_day()    mondays = get_next_two_monday()    fridays = get_next_two_friday()
    df_list = []    #    next_suzhou_to_shanghai = get_trains(sz_to_sh_url.format(next_day), sz_to_sh, sz_station, sz_to_sh_train)    df_list.append(next_suzhou_to_shanghai)    today_shanghai_to_suzhou = get_trains(sh_to_sz_url.format(today), sh_to_sz, sh_station, sh_to_sz_train)    df_list.append(today_shanghai_to_suzhou)
    suzhou_to_shanghai_next_monday = get_trains(sz_to_sh_url.format(mondays[0]), sz_to_sh, sz_station, sz_to_sh_train)    df_list.append(suzhou_to_shanghai_next_monday)
    suzhou_to_shanghai_next_next_monday = get_trains(sz_to_sh_url.format(mondays[1]), sz_to_sh, sz_station,                                                     sz_to_sh_train)    df_list.append(suzhou_to_shanghai_next_next_monday)    #
    shanghai_to_suzhou_next_friday = get_trains(sh_to_sz_url.format(fridays[0]), sh_to_sz, sh_station, sh_to_sz_train)    df_list.append(shanghai_to_suzhou_next_friday)
    shanghai_to_suzhou_next_next_friday = get_trains(sh_to_sz_url.format(fridays[1]), sh_to_sz, sh_station,                                                     sh_to_sz_train)    df_list.append(shanghai_to_suzhou_next_next_friday)
    driver.quit()
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from bs4 import BeautifulSoup
import pandas
import arrow


Max_Time_Out = 30
Time_Out = 10
sh_to_sz = "18002400"
sz_to_sh = "06001200"
sh_to_sz_train = "G7026|G7260|G7062|G7226|G7028"
sh_station = 'cc_from_station_上海_check'
sz_to_sh_train = "D3125|G7001|G7037|G7039"
sz_station = 'cc_from_station_苏州_check'


time_id = "cc_start_time"
now = arrow.now().format('YYYY-MM-DD HH:MM:SS')


option = webdriver.ChromeOptions()


option.add_argument('disable-infobars')
# option.binary_location="/Applications/AppleTools /Google Chrome.app/Contents/MacOS/Google Chrome"
option.add_argument('headless')


sh_to_sz_url = "https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs=%E4%B8%8A%E6%B5%B7,SHH&ts=%E8%8B%8F%E5%B7%9E,SZH&date={}&flag=N,N,Y"
sz_to_sh_url = "https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs=%E8%8B%8F%E5%B7%9E,SZH&ts=%E4%B8%8A%E6%B5%B7,SHH&date={}&flag=N,N,Y"


driver = webdriver.Chrome(chrome_options=option)


driver.set_page_load_timeout(Max_Time_Out)




def open_page(url):
    try:
        driver.get(url)
    except TimeoutError:
        print("cannot open the page for {} seconds".format(Max_Time_Out))
        driver.execute_script('window.stop()')




def find_element(obj):
    WebDriverWait(driver, Time_Out).until(EC.visibility_of_element_located((By.ID, obj)))
    element = WebDriverWait(driver, Time_Out).until(lambda x: driver.find_element(By.ID, obj))
    return element




def type(obj, value):
    find_element(obj).clear()
    find_element(obj).send_keys(value)




def clickat(obj):
    WebDriverWait(driver, Time_Out).until(EC.element_to_be_clickable((By.ID, obj)))
    find_element(obj).click()




def toggle_checkbox(station_id):
    inputs = driver.find_elements_by_tag_name("input")
    for input in inputs:
        if input.get_attribute("value") == 'G':
            input.click()
        if input.get_attribute("value") == 'D':
            input.click()
        if input.get_attribute("id") == station_id:
            input.click()




def get_today():
    today = arrow.now()
    if today.weekday() not in [5, 6]:
        return today.format('YYYY-MM-DD')




def get_next_day():
    next_day = arrow.now().shift(days=+1)
    if next_day.weekday() not in [5, 6]:
        return next_day.format('YYYY-MM-DD')




def get_next_two_monday():
    next_two_monday = [arrow.now().shift(days=x).format('YYYY-MM-DD') for x in range(1, 15) if
                       arrow.now().shift(days=x).weekday() == 0]
    return next_two_monday




def get_next_two_friday():
    next_two_friady = [arrow.now().shift(days=x).format('YYYY-MM-DD') for x in range(1, 15) if
                       arrow.now().shift(days=x).weekday() == 4]
    return next_two_friady




def select_time(obj, time_zone, station_id):
    '''


    :param driver:
    :param obj:
    :return:


    <select class="select-small" id="cc_start_time">
    <option value="00002400">00:00--24:00</option>
<option value="00000600">00:00--06:00</option>
<option value="06001200">06:00--12:00</option>
<option value="12001800">12:00--18:00</option>
<option value="18002400">18:00--24:00</option>
</select>
    '''


    start_time = find_element(obj)
    get_start = Select(start_time)
    get_start.select_by_value(time_zone)
    toggle_checkbox(station_id)
    time.sleep(3)




def get_trains(url, city_time, station_id, trains):
    open_page(url)
    select_time(time_id, city_time, station_id)
    ht = driver.page_source
    html_pd = pandas.read_html(ht)


    pd = html_pd[1]


    pd3 = pd[pd['车次'].str.contains(trains, na=False)]


    coloum_list = ["车次", "二等座", "无座"]
    # print(pd3[coloum_list])


    return pd3[coloum_list]
    
  if __name__ == "__main__":
    today = get_today()
    next_day = get_next_day()
    mondays = get_next_two_monday()
    fridays = get_next_two_friday()


    df_list = []
    #
    next_suzhou_to_shanghai = get_trains(sz_to_sh_url.format(next_day), sz_to_sh, sz_station, sz_to_sh_train)
    df_list.append(next_suzhou_to_shanghai)
    today_shanghai_to_suzhou = get_trains(sh_to_sz_url.format(today), sh_to_sz, sh_station, sh_to_sz_train)
    df_list.append(today_shanghai_to_suzhou)


    suzhou_to_shanghai_next_monday = get_trains(sz_to_sh_url.format(mondays[0]), sz_to_sh, sz_station, sz_to_sh_train)
    df_list.append(suzhou_to_shanghai_next_monday)


    suzhou_to_shanghai_next_next_monday = get_trains(sz_to_sh_url.format(mondays[1]), sz_to_sh, sz_station,
                                                     sz_to_sh_train)
    df_list.append(suzhou_to_shanghai_next_next_monday)
    #


    shanghai_to_suzhou_next_friday = get_trains(sh_to_sz_url.format(fridays[0]), sh_to_sz, sh_station, sh_to_sz_train)
    df_list.append(shanghai_to_suzhou_next_friday)


    shanghai_to_suzhou_next_next_friday = get_trains(sh_to_sz_url.format(fridays[1]), sh_to_sz, sh_station,
                                                     sh_to_sz_train)
    df_list.append(shanghai_to_suzhou_next_next_friday)


    driver.quit()

把自己需要乘坐的车次,日期给设定好,当然我这边主要是要算周五跟周一,监控了两个星期的这两天。设置好定时,给自己发邮件。

然后发邮件,大概样子是这样的:

python模拟火车票售票系统 python买火车票_Time

这样上班的时候,就收到邮件,就可以择机买票了。

现在来聊聊,处理web页面,可能用到哪些库。

python对web的操作常用到的库:请求库,解析库,存储库,工具库

1. 请求库

urllib/re是python默认自带的库

Python内置的Http请求库urllib.request 请求模块    模拟浏览器urllib.error 异常处理模块urllib.parse url解析模块    工具模块,如:拆分、合并urllib.robotparser robots.txt    解析模块 
Python内置的Http请求库
urllib.request 请求模块    模拟浏览器
urllib.error 异常处理模块
urllib.parse url解析模块    工具模块,如:拆分、合并
urllib.robotparser robots.txt    解析模块

例子:

超时测试
  import urllib.request
  response =urllib.request.urlopen('http://httpbin.org/get',timeout=1)
  print(response.read())
  -----正常
  import socket
  import urllib.reqeust
  import urllib.error
  try:
    response=urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
  except urllib.error.URLError as e:
    if isinstance(e.reason,socket.timeout):
      print('TIME OUT')
  这是就是输出 TIME OUT
超时测试


  import urllib.request


  response =urllib.request.urlopen('http://httpbin.org/get',timeout=1)


  print(response.read())


  -----正常


  import socket


  import urllib.reqeust


  import urllib.error


  try:


    response=urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)


  except urllib.error.URLError as e:


    if isinstance(e.reason,socket.timeout):


      print('TIME OUT')


  这是就是输出 TIME OUT

Request     可以添加headers

import urllib.request
  request=urllib.request.Request('https://python.org')
  response=urllib.request.urlopen(request)
  print(response.read().decode('utf-8'))


   import urllib.request


  request=urllib.request.Request('https://python.org')


  response=urllib.request.urlopen(request)


  print(response.read().decode('utf-8'))

例子:

from urllib import request,parse
  url='http://httpbin.org/post'
  headers={
    User-Agent:Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36    Host:httpbin.org  }
  dict={
    'name':'Germey'
  }


  data=bytes(parse.urlencode(dict),encoding='utf8')
  req= request.Request(url=url,data=data,headers=headers,method='POST')
  response = request.urlopen(req)
  print(response.read().decode('utf-8'))


  from urllib import request,parse


  url='http://httpbin.org/post'


  headers={


    User-Agent:Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36
    Host:httpbin.org
  }


  dict={


    'name':'Germey'


  }






  data=bytes(parse.urlencode(dict),encoding='utf8')


  req= request.Request(url=url,data=data,headers=headers,method='POST')


  response = request.urlopen(req)


  print(response.read().decode('utf-8'))

代理

import urllib.request
  proxy_handler =urllib.request.ProxyHandler({
    'http':'http://127.0.0.1:9743',
    'https':'http://127.0.0.1:9743',
  })
  opener =urllib.request.build_opener(proxy_handler)
   response= opener.open('http://httpbin.org/get')
  print(response.read())

   import urllib.request


  proxy_handler =urllib.request.ProxyHandler({


    'http':'http://127.0.0.1:9743',


    'https':'http://127.0.0.1:9743',


  })


  opener =urllib.request.build_opener(proxy_handler)


   response= opener.open('http://httpbin.org/get')


  print(response.read())

Cookie

import http.cookiejar,urllib.request
  cookie = http.cookiejar.Cookiejar()
  handler=urllib.request.HTTPCookieProcessor(cookie)
  opener = urllib.request.build_opener(handler)
  response = opener.open('http://www.baidu.com')
  for item in cookie:
    print(item.name+"="+item.value)
   import http.cookiejar,urllib.request


  cookie = http.cookiejar.Cookiejar()


  handler=urllib.request.HTTPCookieProcessor(cookie)


  opener = urllib.request.build_opener(handler)


  response = opener.open('http://www.baidu.com')


  for item in cookie:


    print(item.name+"="+item.value)

第一种保存cookie方式

import http.cookiejar,urllib.request
  filename = 'cookie.txt'  
  cookie =http.cookiejar.MozillaCookieJar(filename)
  handler= urllib.request.HTTPCookieProcessor(cookie)
  opener=urllib.request.build_opener(handler)
  response= opener.open('http://www.baidu.com')
  cookie.save(ignore_discard=True,ignore_expires=True)
  import http.cookiejar,urllib.request


  filename = 'cookie.txt'  


  cookie =http.cookiejar.MozillaCookieJar(filename)


  handler= urllib.request.HTTPCookieProcessor(cookie)


  opener=urllib.request.build_opener(handler)


  response= opener.open('http://www.baidu.com')


  cookie.save(ignore_discard=True,ignore_expires=True)

(2) requests

pip3 install requests
pip3 install requests

(3) selenium

pip3 install selenium
pip3 install selenium
2. 解析库:

2.1 lxml (XPATH)

pip3 install lxml
pip3 install lxml

test, test.html指上述实例

直接读取内容

from lxml import etreehtml = etree.HTML(test)
from lxml import etree
html = etree.HTML(test)

直接读取文件

from lxml import etreehtml = etree.parse(test.html)
from lxml import etree
html = etree.parse(test.html)

2.2 beautifulsoup

pip3 install beautifulsoup4
pip3 install beautifulsoup4

验证

from bs4 import BeautifulSoupsoup = BeautifulSoup('<html></html>','lxml')
from bs4 import BeautifulSoup
soup = BeautifulSoup('<html></html>','lxml')

2.3 pyquery(类似jquery语法)

pip3 install pyquery
pip3 install pyquery

注意:由于 pyquery 依赖于 lxml ,要先安装 lxml ,否则会提示失败。验证安装结果

from pyquery import PyQuery as pqdoc = pq('<html>hi</html>')result = doc('html').text()
from pyquery import PyQuery as pq
doc = pq('<html>hi</html>')
result = doc('html').text()

有 4 种方法可以进行初始化:可以通过传入 字符串、lxml、文件 或者 url 来使用PyQuery。

from pyquery import PyQuery as pqfrom lxml import etree
d = pq("<html></html>")#传入字符串d = pq(etree.fromstring("<html></html>"))#传入lxmld = pq(url=‘http://google.com/‘) #传入urld = pq(filename=path_to_html_file) #传入文件
from pyquery import PyQuery as pq
from lxml import etree


d = pq("<html></html>")#传入字符串
d = pq(etree.fromstring("<html></html>"))#传入lxml
d = pq(url=‘http://google.com/‘) #传入url
d = pq(filename=path_to_html_file) #传入文件

1、.html() 和.text():获取相应的 HTML 块或者文本内容,

p=pq("<head><title>Hello World!</title></head>")
print p(‘head‘).html()# 获取相应的 HTML 块print p(‘head‘).text()# 获取相应的文本内容
p=pq("<head><title>Hello World!</title></head>")


print p(‘head‘).html()# 获取相应的 HTML 块
print p(‘head‘).text()# 获取相应的文本内容

输出:

<title>hello</title>Hello World!
<title>hello</title>
Hello World!

2、.(‘selector‘):通过选择器来获取目标内容,

d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")
print d(‘div‘).html()# 获取 <div> 元素内的 HTML 块print d(‘#item-0‘).text()# 获取 id 为 item-0 的元素内的文本内容print d(‘.item-1‘).text()# 获取 class 为 item-1 的元素的文本内容
d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")


print d(‘div‘).html()# 获取 <div> 元素内的 HTML 块
print d(‘#item-0‘).text()# 获取 id 为 item-0 的元素内的文本内容
print d(‘.item-1‘).text()# 获取 class 为 item-1 的元素的文本内容

输出:

<p id="item-0">test 1</p><p class="item-1">test 2</p>test 1test 2
<p id="item-0">test 1</p><p class="item-1">test 2</p>
test 1
test 2

3、.eq(index):根据索引号获取指定元素(index 从 0 开始),

d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")
print d(‘p‘).eq(1).text()# 获取第二个 p 元素的文本内容,
d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")


print d(‘p‘).eq(1).text()# 获取第二个 p 元素的文本内容,

输出 test 2

4、.find():查找嵌套元素,

d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")
print d(‘div‘).find(‘p‘) # 查找 <div> 内的 p 元素print d(‘div‘).find(‘p‘).eq(0) # 查找 <div> 内的 p 元素,输出第一个 p 元素
d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")


print d(‘div‘).find(‘p‘) # 查找 <div> 内的 p 元素
print d(‘div‘).find(‘p‘).eq(0) # 查找 <div> 内的 p 元素,输出第一个 p 元素

输出:

<p id="item-0">test 1</p><p class="item-1">test 2</p><p id="item-0">test 1</p>
<p id="item-0">test 1</p><p class="item-1">test 2</p>
<p id="item-0">test 1</p>

5、.filter():根据 class、id 筛选指定元素,

d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")
print d(‘p‘).filter(‘.item-1‘) # 查找 class 为 item-1 的 p 元素print d(‘p‘).filter(‘#item-0‘) # 查找 id 为 item-0 的 p 元素
d = pq("<div><p id=‘item-0‘>test 1</p><p class=‘item-1‘>test 2</p></div>")


print d(‘p‘).filter(‘.item-1‘) # 查找 class 为 item-1 的 p 元素
print d(‘p‘).filter(‘#item-0‘) # 查找 id 为 item-0 的 p 元素

输出:

<p class="item-1">test 2</p><p id="item-0">test 1</p>
<p class="item-1">test 2</p>
<p id="item-0">test 1</p>

6、.attr():获取、修改属性值,

d = pq("<div><p id=‘item-0‘>test 1</p><a class=‘item-1‘>test 2</p></div>")
print d(‘p‘).attr(‘id‘) # 获取 <p> 标签的属性 idprint d(‘a‘).attr(‘class‘,‘new‘)# 修改 <a> 标签的 class 属性为 new
d = pq("<div><p id=‘item-0‘>test 1</p><a class=‘item-1‘>test 2</p></div>")


print d(‘p‘).attr(‘id‘) # 获取 <p> 标签的属性 id
print d(‘a‘).attr(‘class‘,‘new‘)# 修改 <a> 标签的 class 属性为 new

输出:

item-0<a class="new">test 2</a>
item-0
<a class="new">test 2</a>

7、其他操作:

.addClass(value):添加 class;.hasClass(name):判断是否包含指定的 class,返回 True 或 False;.children():获取子元素;.parents():获取父元素;.next():获取下一个元素;.nextAll():获取后面全部元素块;.not_(‘selector‘):获取所有不匹配该选择器的元素;for i in d.items(‘li‘): print i.text():遍历 d 中的 li 元素;
.addClass(value):添加 class;
.hasClass(name):判断是否包含指定的 class,返回 True 或 False;
.children():获取子元素;
.parents():获取父元素;
.next():获取下一个元素;
.nextAll():获取后面全部元素块;
.not_(‘selector‘):获取所有不匹配该选择器的元素;
for i in d.items(‘li‘): print i.text():遍历 d 中的 li 元素;
3. 存储库

3.1 pymysql(操作MySQL,关系型数据库) 安装:

pip3 install pymysql
pip3 install pymysql

3.2 pymongo(操作MongoDB,key-value) 安装

pip3 install pymongo
pip3 install pymongo

3.3  redis(分布式爬虫,维护爬取队列) 安装:

pip3 install redis
pip3 install redis

生活中的点滴小时,用技术来辅助,就可以让生活更加美好了!