1、基础的Python包
import pandas as pd
from pyhive import presto
import matplotlib.pyplot as plt
import sys
import numpy as np
from tqdm import tqdm
from sklearn.externals import joblib
from joblib import Parallel,delayed
import scorecardpy as sc
import toad
2、构建基础的hive-sql调用函数
#设置显示和链接presto
pd.set_option('display.max_columns',None)
cursor=presto.connect('172.21.20.47',9090,'jobschedule').cursor()
#定义sql执行函数
def read_sql(hql):
cursor.execute(hql)
try:
result = cursor.fetchall()
num_columns = len(cursor.description)
columns_names = [i[0] for i in cursor.description]
except Exception as e:
print(num_columns)
print(columns_names)
data = pd.DataFrame(list(result),columns=columns_names)
return data
#函数调用
hql2='''select *
from tmp.tmp_risk_rule_moyao_20200106_aplus_8'''
hql1='''select * from tmp.tmp_risk_rule_ABCDG_0510_0520_OPFC2_copy'''
hql3='''select * from tmp.tmp_risk_rule_moyao_20200224_tezheng limit 10'''
res1=read_sql(hql3)
res1.shape
label= pd.DataFrame(list(result),columns=columns_names)
3、数据结果的存储
res1.to_csv('/data/wangyongsheng/other/tmp_risk_rule_moyao_20200224_tezhen.csv',index=False,encoding='utf-8 sig')