Friday, June 8, 2018

Python -Start scraping - 7. extract data from DBF file without download file

..........

zipfile = ZipFile(io.BytesIO(body))

filenames = [y for y in sorted(zipfile.namelist()) for ending in ['dbf', 'prj', 'shp', 'shx'] if y.endswith(ending)]
dbf, prj, shp, shx = [io.BytesIO(zipfile.read(filename)) for filename in filenames]

rshp = shapefile.Reader(shp=shp, shx=shx, dbf=dbf)
fields = rshp.fields[1:]
field_names = [field[0] for field in fields]

data_1 = []
data_2 = []
for r in rshp.shapeRecords():
    r = dict(zip(field_names, r.record))
............

Python -Start scraping - 6. pandas unpivot data

from ftplib import FTP
import zipfile
import csv
import pandas as pd

filename = 'XXXX.zip'
ftp = FTP('ftp.XXXX.com')
ftp.login()
ftp.cwd("/XXXX")
gFile = open(filename, "wb")
ftp.retrbinary('RETR XXXX.zip', gFile.write)
gFile.close()
ftp.quit()

# Unzip filewith zipfile.ZipFile(filename,"r") as zip_ref:
    zip_ref.extractall()

# read csv filewith open("XXXX.csv") as f:
    reader = csv.DictReader(f)
    next(reader)
    data = []
    for r in reader:
        data.append(r)
    for i in range(0,len(data)):
        data[i]['api'] = data[i].pop('API_14')
        data[i]['operator_code'] = data[i].pop('Operator_Number')
        data[i]['operator'] = data[i].pop('Operator_Name')
        data[i]['well_name'] = data[i].pop('Well_Name')
        data[i]['well_number'] = data[i].pop('Well_Number')
        data[i]['county_code'] = data[i].pop('API_County')
        data[i]['township'] = data[i].pop('Twp')
        data[i]['range'] = data[i].pop('Rng')
        data[i]['section'] = data[i].pop('Sec')
        data[i]['meridian'] = data[i].pop('PM')
        data[i]['surface_latitude'] = data[i].pop('Lat_Y')
        data[i]['surface_longitude'] = data[i].pop('Long_X')
        data[i]['otc_county'] = data[i].pop('OTC_County')
        data[i]['otc_pun'] = data[i].pop('OTC_PUN')
        data[i]['reservoir_code'] = data[i].pop('Formation_Code')
        data[i]['reservoir'] = data[i].pop('Formation_Name')
        data[i]['production_year'] = data[i].pop('Reporting_Year')
        data[i]['01-01'] = data[i].pop('January_Manuf')
        data[i]['02-01'] =  data[i].pop('February_Manuf')
        data[i]['03-01'] = data[i].pop('March_Manuf')
        data[i]['04-01'] = data[i].pop('April_Manuf')
        data[i]['05-01'] = data[i].pop('May_Manuf')
        data[i]['06-01'] = data[i].pop('June_Manuf')
        data[i]['07-01'] = data[i].pop('July_Manuf')
        data[i]['08-01'] = data[i].pop('August_Manuf')
        data[i]['09-01'] =  data[i].pop('September_Manuf')
        data[i]['10-01'] = data[i].pop('October_Manuf')
        data[i]['11-01'] = data[i].pop('November_Manuf')
        data[i]['12-01'] = data[i].pop('December_Manuf')
        data[i].pop('API_Base')
        data[i].pop('Reporter_Number')
        data[i].pop('Reporter_Name')
        data[i].pop('Q4')
        data[i].pop('Q3')
        data[i].pop('Q2')
        data[i].pop('Q1')

    data_prod = []

    for i in range(0,len(data)):
        # create dataframe        df = pd.DataFrame(dict(data[i]),index = [0])

        df_unpivot = pd.melt(df, id_vars=['api', 'operator_code','operator', 'well_name',
                             'well_number','county_code', 'township', 'range',
                             'section', 'meridian','surface_latitude',
                             'surface_longitude','otc_county', 'otc_pun','reservoir_code',
                             'reservoir', 'production_year'],
                             var_name='production_month', value_name='gas')
        # upivot dataframe to list        data_prod = df_unpivot.to_dict(orient='records')

        # insert key and value to list element(dict)        for i in range(0,len(data_prod)):
            data_prod[i]['gas_unit'] = 'Mcf'            data_prod[i]['production_date'] = data_prod[i]['production_year']+'-' + data_prod[i]['production_month']
            data_prod[i]['surface_datum'] = 'NAD 83'            data_prod[i]['comment'] = 'assumed surface datum'

            data_prod[i].pop('production_year')
            data_prod[i].pop('production_month')
        print(data_prod)

Python -Start scraping - 5. connect to ftp website

from ftplib import FTP
import zipfile
import csv

filename = 'XXX.ZIP'
ftp = FTP('ftp.XXXX.com')
ftp.login()
ftp.cwd("/XXX")
gFile = open(filename, "wb")
ftp.retrbinary('RETR XXX.zip', gFile.write)
gFile.close()
ftp.quit()

# Unzip file    with zipfile.ZipFile(filename,"r") as zip_ref:
    zip_ref.extractall()
    
# read csv filewith open("XXX.csv") as f:
    reader = csv.DictReader(f)
    next(reader)
    data = []
    for r in reader:
        data.append(r)
    for i in range(0,len(data)):
        data[i]['api'] = data[i].pop('API_Number')
        data[i]['well_name'] = data[i].pop('Well_Name')
        data[i]['well_number'] = data[i].pop('Well_Number')
        data[i]['operator_code'] = data[i].pop('Operator')
        data[i]['operator'] = data[i].pop('Operator_Name')
        data[i]['well_status'] = data[i].pop('Well_Status')
        data[i]['well_type'] = data[i].pop('Well_Type')
        data[i]['county_code'] = data[i].pop('County')
        data[i]['location_datum'] = data[i].pop('Loc_Type')
        data[i]['township'] = data[i].pop('Twp')
        data[i]['range'] = data[i].pop('Range')
        data[i]['section'] = data[i].pop('Sec')
        data[i]['principle_meridian'] = data[i].pop('PM')
        data[i]['latitude'] = data[i].pop('Lat_Y')
        data[i]['longitude'] = data[i].pop('Long_X')
        data[i]['measured_depth'] = data[i].pop('Measured_TotalDepth')
        data[i]['vertical_depth'] =  data[i].pop('TrueVertical_Depth')
        data[i]['spud_date'] = data[i].pop('Spud')

Python -Start scraping - 4. connect to HTTPS website

import http.client
from bs4 import BeautifulSoup
import re
import io
import zipfile
import csv

# Find substring within a stringdef find_between(s, first, last):
    try:
        start = s.index(first) + len(first)
        end = s.index(last, start)
        return s[start:end]
    except ValueError:
        return ""    host = 'XXXX'path_orig = '/XXXX/XXXX/XXXX'
conn = http.client.HTTPSConnection(host)

# get the latest file pathconn.request('GET',path_orig)
r = conn.getresponse()

soup = BeautifulSoup(r,'html.parser')
links = soup.findAll('a')

for link in links:
    prod_link = link.get('href')

    match = re.search(r'pub/Database/Production', str(prod_link))
    if match:
        path = prod_link
        file_name = find_between(prod_link,'/pub/Database/','zip') + 'csv'
# Getconn.request('GET', path)   
r = conn.getresponse()
 
# read zip filewith zipfile.ZipFile(io.BytesIO(r.read()), "r") as zf:
    zf.extractall()
    
with open(file_name) as f:
    reader = csv.DictReader(f)
    next(reader)
    data = []  
    
    for r in reader:
        data.append(r)
        
    for i in range(0,len(data)):
        data[i]['production_date'] = data[i].pop('ReportPeriod')
        data[i]['operator_current'] = data[i].pop('Operator')
        data[i]['api'] = data[i].pop('API')
        data[i]['wellbore'] = data[i].pop('WellBore')
        data[i]['reservoir'] = data[i].pop('FormationName').strip()
        data[i]['well_type'] = data[i].pop('WellType')
        data[i]['production_days'] = data[i].pop('DaysProd')
        data[i]['oil'] = data[i].pop('Oil')
        data[i]['gas'] = data[i].pop('Gas')
        data[i]['water'] = data[i].pop('Water')      
        data[i].pop('IsAmended')
        data[i].pop('Oper_No')
        data[i].pop('Old_Oper_No')
        data[i].pop('Entity')
        data[i].pop('WellStatus')
    
   # print(data)

Tuesday, May 1, 2018

Python -Start scraping - 3. extract PDF to text/html using pdfminer

For python version 3.6, install pdfminer.six or pdfminer3k using pip install

from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage

def convert_pdf_to_text(fname, pages=None):
    if not pages:
        pagenums = set()
    else:
        pagenums = set(pages)

    output = StringIO()
    manager = PDFResourceManager()
    converter = TextConverter(manager, output, laparams=LAParams())
    interpreter = PDFPageInterpreter(manager, converter)

    infile = open(fname, 'rb')
    for page in PDFPage.get_pages(infile, pagenums):
        interpreter.process_page(page)
    infile.close()
    converter.close()
    text = output.getvalue()
    output.close
    return text

pdf_text = convert_pdf_to_text("1DD76387.pdf")
print(pdf_text)

----------------------------------------------------------------------------------------------------------------

from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import HTMLConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import BytesIO



def convert_pdf_to_html(path):
    rsrcmgr = PDFResourceManager()
    retstr = BytesIO()
    codec = 'utf-8'
    laparams = LAParams()
    device = HTMLConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
    fp = open(path, 'rb')
    interpreter = PDFPageInterpreter(rsrcmgr, device)
    password = ""
    maxpages = 0 #is for all
    caching = True
    pagenos=set()
    for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
        interpreter.process_page(page)
    fp.close()
    device.close()
    str = retstr.getvalue()
    retstr.close()
    return str

test = convert_pdf_to_html('1DD76387.pdf')

Friday, April 20, 2018

Python -Start scraping - 2. website data scrape using bs4 and post request

# -*- coding: utf-8 -*-
"""
Created on Mon Apr  9 11:51:03 2018

"""
from urllib.parse import urlencode
import requests
from bs4 import BeautifulSoup
from datetime import datetime

# Headers; both required!
headers = {
    'Content-Type': 'application/x-www-form-urlencoded',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
}
#-----------------------------------------------------------------------------------------------------------
# Temp parameters: These will be passed in later
#-----------------------------------------------------------------------------------------------------------
# Set up request criteria
data = urlencode({
        """        put parameters here 
        """
})

# Start session and "set query context" by first calling builder page.  This call will acquire session cookie.
session = requests.Session()

try:
    # Post query that includes request criteria by calling submit page
    text = session.post('http://www.xxxxxxxx', headers=headers, data=data).text

    # Loop through each record
    soup = BeautifulSoup(text, 'html.parser')
 
    class_type = ['Item','Alternating']

    for c_type in class_type:
        data_list_items = soup.find_all(class_= c_type)
 
        for data_item in data_list_items:
            columns = permit_item.find_all("td")
     
            data1 = columns[0].contents[0].replace('\xa0', ' ')
            data2 = columns[1].contents[0].replace('\xa0', ' ')
            data3 = columns[2].contents[0].replace('\xa0', ' ')

        
            # reformat permit date to YYYY-mm-dd
            data1 = datetime.strptime(data1, '%m/%d/%Y').strftime('%Y-%m-%d')
     
            # Build document
            document = {
                'data1': data1,
                'data2': data1,
                'data3': data3,
             
                }
       
            print(document)
            
# Raise Exception  
except:
    raise Exception('No data found.')

Python -Start scraping - 1. Zip file download

"""
Created on Tue Mar 20 09:23:10 2018
"""

import  requests
import zipfile

url = 'https://www.xxxxxxx.zip'
username = 'xxxx'
password = 'xxxx'
file_name = 'xxxxxx.zip'


# Download wellIndex.zip file
print("downloading with requests")
r = requests.get(url, auth=(username, password))
with open(file_name, "wb") as code:
    code.write(r.content)
    
# Unzip files
with zipfile.ZipFile(file_name,"r") as zip_ref:
    zip_ref.extractall()