Bootstrap

python批量查询数据库_Python + MySQL 批量查询百度收录

做SEO的同学,经常会遇到几百或几千个站点,然后对于收录情况去做分析的情况

那么多余常用的一些工具在面对几千个站点需要去做收录分析的时候,那么就显得不是很合适。

在此特意分享给大家一个批量查询百度收录状况的代码

使用 Python + MySQL(MariaDB) 配合使用

import pymysql

from urllib import request

import re

import time

import os,sys

# 数据操作类

class DataExec:

# 定义私有属性

# 数据库名称

db = "domain"

dt = "bdshoulu"

# 数据库登录信息

hostName = "localhost"

userName = "root"

password = "pwd"

# 构造方法

def __init__(self):

self.conn = self.conn()

# 析构方法

def __del__(self):

self.conn.close()

# 创建数据库连接对象

def conn(self):

host = self.hostName

user = self.userName

password = self.password

dbs = self.db

conn = pymysql.connect(host=host,

user=user,

password=password,

db=dbs,

charset='utf8mb4')

return conn

# 查询数据

def selectwebsite(self):

dt = self.dt

conn = self.conn

cursor = conn.cursor()

sql = 'select id,website from %s order by id' % dt

try:

cursor.execute(sql)

return cursor.fetchall()

except:

print("%s" % sql)

# 修改数据

def update_shoulu(self, id, shoulu):

dt = self.dt

conn = self.conn

cursor = conn.cursor()

sql = 'update {_table} set shoulu = "{_shoulu}" where id = "{_id}"'.\

format(_table = dt, _shoulu = shoulu, _id = id)

try:

cursor.execute(sql)

# 提交数据

conn.commit()

except:

# 数据回滚

conn.rollback()

def commit(self):

self.conn.commit()

db = DataExec()

results = db.selectwebsite()

for row in results:

id = row[0]

website = row[1]

url = "https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd=site:" + website

# print(url)

try:

req = request.Request(url)

req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36')

# 直接请求

response = request.urlopen(req,timeout=15)

# 获取状态码,如果是200表示成功

if response.status == 200:

# 读取网页内容

html = response.read().decode('utf-8', 'ignore')

# print(html)

pattern = re.compile(r'找到相关结果数约(\d+?)个')

m = pattern.search(html)

print(m)

if m:

slnum = m.group(1)

print(id, website, '已收录 ', slnum)

db.update_shoulu(id, slnum)

else:

pattern = re.compile(r'该网站共有\s*?(.+?)\s*?个网页被百度收录')

m = pattern.search(html)

if m:

slnum = m.group(1)

slnum = int(slnum.replace(',',''))

print(id, website, '已收录 ', slnum)

db.update_shoulu(id, slnum)

else:

print(id, website)

except:

continue

time.sleep(1)

# 统一提交数据

# db.commit()

sys.exit()

上面代码思路就是从数据库中抓取出各个域名(website),然后使用Python抓取百度的查询收录的页面,更新其参数,然后对于返回的结果使用正则匹配到对应的收录结果。整理思路比较简单,不熟悉的可以读取代码走一遍流程即可,需要的同学拿走

;