如何使用 Apache Beam 中的运行时值提供程序写入 Big Query?
How can I write to Big Query using a runtime value provider in Apache Beam?
编辑:我使用 beam.io.WriteToBigQuery 并启用了接收器实验选项来实现它。我实际上有它,但我的问题是我试图 "build" 来自包装在 str() 中的两个变量(数据集 + table)的完整 table 引用。这是将整个值提供程序参数数据作为字符串,而不是调用 get() 方法来仅获取值。
OP
我正在尝试生成一个数据流模板,然后从 GCP 云函数调用。(作为参考,我的数据流作业应该读取一个包含一堆文件名的文件,然后从 GCS 读取所有这些文件并将 写入 BQ)。
因此,我需要以这样的方式编写它,以便我可以使用 运行 时间值提供程序来传递 BigQuery dataset/table.
我的 post 底部是我目前的代码,省略了一些与问题无关的内容。
特别注意 BQ_flexible_writer(beam.DoFn) - 这就是我尝试 "customise" beam.io.WriteToBigQuery 的地方,以便它接受 运行 时间值提供者。
我的模板生成很好,当我测试 运行 管道而不提供 运行 时间变量(依赖于默认值)时,它成功了,我在查看工作时看到了添加的行安慰。但是,当检查 BigQuery 时没有数据(三次检查 dataset/table 名称在日志中是否正确)。不确定它去了哪里或我可以添加什么日志记录来了解元素发生了什么?
知道这里发生了什么吗?或者关于如何使用 运行 时间变量写入 BigQuery 的建议?我什至可以按照我将其包含在我的 DoFn 中的方式调用 beam.io.WriteToBigQuery 还是我必须使用 beam.io.WriteToBigQuery 后面的实际代码并使用它?
#=========================================================
class BQ_flexible_writer(beam.DoFn):
def __init__(self, dataset, table):
self.dataset = dataset
self.table = table
def process(self, element):
dataset_res = self.dataset.get()
table_res = self.table.get()
logging.info('Writing to table: {}.{}'.format(dataset_res,table_res))
beam.io.WriteToBigQuery(
#dataset= runtime_options.dataset,
table = str(dataset_res) + '.' + str(table_res),
schema = SCHEMA_ADFImpression,
project = str(PROJECT_ID), #options.display_data()['project'],
create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED, #'CREATE_IF_NEEDED',#create if does not exist.
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND #'WRITE_APPEND' #add to existing rows,partitoning
)
# https://cloud.google.com/dataflow/docs/guides/templates/creating-templates#valueprovider
class FileIterator(beam.DoFn):
def __init__(self, files_bucket):
self.files_bucket = files_bucket
def process(self, element):
files = pd.read_csv(str(element), header=None).values[0].tolist()
bucket = self.files_bucket.get()
files = [str(bucket) + '/' + file for file in files]
logging.info('Files list is: {}'.format(files))
return files
#
class OutputValueProviderFn(beam.DoFn):
def __init__(self, vp):
self.vp = vp
def process(self, unused_elm):
yield self.vp.get()
class RuntimeOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument(
'--dataset',
default='EDITED FOR PRIVACY',
help='BQ dataset to write to',
type=str)
parser.add_value_provider_argument(
'--table',
default='EDITED FOR PRIVACY',
required=False,
help='BQ table to write to',
type=str)
parser.add_value_provider_argument(
'--filename',
default='EDITED FOR PRIVACY',
help='Filename of batch file',
type=str)
parser.add_value_provider_argument(
'--batch_bucket',
default='EDITED FOR PRIVACY',
help='Bucket for batch file',
type=str)
#parser.add_value_provider_argument(
# '--bq_schema',
#default='gs://dataflow-samples/shakespeare/kinglear.txt',
# help='Schema to specify for BQ')
#parser.add_value_provider_argument(
# '--schema_list',
#default='gs://dataflow-samples/shakespeare/kinglear.txt',
# help='Schema in list for processing')
parser.add_value_provider_argument(
'--files_bucket',
default='EDITED FOR PRIVACY',
help='Bucket where the raw files are',
type=str)
parser.add_value_provider_argument(
'--complete_batch',
default='EDITED FOR PRIVACY',
help='Bucket where the raw files are',
type=str)
#=========================================================
def run():
#====================================
# TODO PUT AS PARAMETERS
#====================================
JOB_NAME_READING = 'adf-reading'
JOB_NAME_PROCESSING = 'adf-'
job_name = '{}-batch--{}'.format(JOB_NAME_PROCESSING,_millis())
pipeline_options_batch = PipelineOptions()
runtime_options = pipeline_options_batch.view_as(RuntimeOptions)
setup_options = pipeline_options_batch.view_as(SetupOptions)
setup_options.setup_file = './setup.py'
google_cloud_options = pipeline_options_batch.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = job_name
google_cloud_options.region = 'europe-west1'
google_cloud_options.staging_location = GCS_STAGING_LOCATION
google_cloud_options.temp_location = GCS_TMP_LOCATION
#pipeline_options_batch.view_as(StandardOptions).runner = 'DirectRunner'
# # If datflow runner [BEGIN]
pipeline_options_batch.view_as(StandardOptions).runner = 'DataflowRunner'
pipeline_options_batch.view_as(WorkerOptions).autoscaling_algorithm = 'THROUGHPUT_BASED'
#pipeline_options_batch.view_as(WorkerOptions).machine_type = 'n1-standard-96' #'n1-highmem-32' #'
pipeline_options_batch.view_as(WorkerOptions).max_num_workers = 10
# [END]
pipeline_options_batch.view_as(SetupOptions).save_main_session = True
#Needed this in order to pass table to BQ at runtime
pipeline_options_batch.view_as(DebugOptions).experiments = ['use_beam_bq_sink']
with beam.Pipeline(options=pipeline_options_batch) as pipeline_2:
try:
final_data = (
pipeline_2
|'Create empty PCollection' >> beam.Create([None])
|'Get accepted batch file 1/2:{}'.format(OutputValueProviderFn(runtime_options.complete_batch)) >> beam.ParDo(OutputValueProviderFn(runtime_options.complete_batch))
|'Get accepted batch file 2/2:{}'.format(OutputValueProviderFn(runtime_options.complete_batch)) >> beam.ParDo(FileIterator(runtime_options.files_bucket))
|'Read all files' >> beam.io.ReadAllFromText(skip_header_lines=1)
|'Process all files' >> beam.ParDo(ProcessCSV(),COLUMNS_SCHEMA_0)
|'Format all files' >> beam.ParDo(AdfDict())
#|'WriteToBigQuery_{}'.format('test'+str(_millis())) >> beam.io.WriteToBigQuery(
# #dataset= runtime_options.dataset,
# table = str(runtime_options.dataset) + '.' + str(runtime_options.table),
# schema = SCHEMA_ADFImpression,
# project = pipeline_options_batch.view_as(GoogleCloudOptions).project, #options.display_data()['project'],
# create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED, #'CREATE_IF_NEEDED',#create if does not exist.
# write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND #'WRITE_APPEND' #add to existing rows,partitoning
# )
|'WriteToBigQuery' >> beam.ParDo(BQ_flexible_writer(runtime_options.dataset,runtime_options.table))
)
except Exception as exception:
logging.error(exception)
pass
请 运行 使用以下附加选项。
--experiment=use_beam_bq_sink
否则,Dataflow 目前会使用不支持 ValueProviders 的本机版本覆盖 BigQuery 接收器。
此外,请注意,不支持将数据集设置为 运行时间参数。尝试将 table 参数指定为整个 table 引用(DATASET.TABLE 或 PROJECT:DATASET。TABLE)。
编辑:我使用 beam.io.WriteToBigQuery 并启用了接收器实验选项来实现它。我实际上有它,但我的问题是我试图 "build" 来自包装在 str() 中的两个变量(数据集 + table)的完整 table 引用。这是将整个值提供程序参数数据作为字符串,而不是调用 get() 方法来仅获取值。
OP
我正在尝试生成一个数据流模板,然后从 GCP 云函数调用。(作为参考,我的数据流作业应该读取一个包含一堆文件名的文件,然后从 GCS 读取所有这些文件并将 写入 BQ)。 因此,我需要以这样的方式编写它,以便我可以使用 运行 时间值提供程序来传递 BigQuery dataset/table.
我的 post 底部是我目前的代码,省略了一些与问题无关的内容。 特别注意 BQ_flexible_writer(beam.DoFn) - 这就是我尝试 "customise" beam.io.WriteToBigQuery 的地方,以便它接受 运行 时间值提供者。
我的模板生成很好,当我测试 运行 管道而不提供 运行 时间变量(依赖于默认值)时,它成功了,我在查看工作时看到了添加的行安慰。但是,当检查 BigQuery 时没有数据(三次检查 dataset/table 名称在日志中是否正确)。不确定它去了哪里或我可以添加什么日志记录来了解元素发生了什么?
知道这里发生了什么吗?或者关于如何使用 运行 时间变量写入 BigQuery 的建议?我什至可以按照我将其包含在我的 DoFn 中的方式调用 beam.io.WriteToBigQuery 还是我必须使用 beam.io.WriteToBigQuery 后面的实际代码并使用它?
#=========================================================
class BQ_flexible_writer(beam.DoFn):
def __init__(self, dataset, table):
self.dataset = dataset
self.table = table
def process(self, element):
dataset_res = self.dataset.get()
table_res = self.table.get()
logging.info('Writing to table: {}.{}'.format(dataset_res,table_res))
beam.io.WriteToBigQuery(
#dataset= runtime_options.dataset,
table = str(dataset_res) + '.' + str(table_res),
schema = SCHEMA_ADFImpression,
project = str(PROJECT_ID), #options.display_data()['project'],
create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED, #'CREATE_IF_NEEDED',#create if does not exist.
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND #'WRITE_APPEND' #add to existing rows,partitoning
)
# https://cloud.google.com/dataflow/docs/guides/templates/creating-templates#valueprovider
class FileIterator(beam.DoFn):
def __init__(self, files_bucket):
self.files_bucket = files_bucket
def process(self, element):
files = pd.read_csv(str(element), header=None).values[0].tolist()
bucket = self.files_bucket.get()
files = [str(bucket) + '/' + file for file in files]
logging.info('Files list is: {}'.format(files))
return files
#
class OutputValueProviderFn(beam.DoFn):
def __init__(self, vp):
self.vp = vp
def process(self, unused_elm):
yield self.vp.get()
class RuntimeOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument(
'--dataset',
default='EDITED FOR PRIVACY',
help='BQ dataset to write to',
type=str)
parser.add_value_provider_argument(
'--table',
default='EDITED FOR PRIVACY',
required=False,
help='BQ table to write to',
type=str)
parser.add_value_provider_argument(
'--filename',
default='EDITED FOR PRIVACY',
help='Filename of batch file',
type=str)
parser.add_value_provider_argument(
'--batch_bucket',
default='EDITED FOR PRIVACY',
help='Bucket for batch file',
type=str)
#parser.add_value_provider_argument(
# '--bq_schema',
#default='gs://dataflow-samples/shakespeare/kinglear.txt',
# help='Schema to specify for BQ')
#parser.add_value_provider_argument(
# '--schema_list',
#default='gs://dataflow-samples/shakespeare/kinglear.txt',
# help='Schema in list for processing')
parser.add_value_provider_argument(
'--files_bucket',
default='EDITED FOR PRIVACY',
help='Bucket where the raw files are',
type=str)
parser.add_value_provider_argument(
'--complete_batch',
default='EDITED FOR PRIVACY',
help='Bucket where the raw files are',
type=str)
#=========================================================
def run():
#====================================
# TODO PUT AS PARAMETERS
#====================================
JOB_NAME_READING = 'adf-reading'
JOB_NAME_PROCESSING = 'adf-'
job_name = '{}-batch--{}'.format(JOB_NAME_PROCESSING,_millis())
pipeline_options_batch = PipelineOptions()
runtime_options = pipeline_options_batch.view_as(RuntimeOptions)
setup_options = pipeline_options_batch.view_as(SetupOptions)
setup_options.setup_file = './setup.py'
google_cloud_options = pipeline_options_batch.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = job_name
google_cloud_options.region = 'europe-west1'
google_cloud_options.staging_location = GCS_STAGING_LOCATION
google_cloud_options.temp_location = GCS_TMP_LOCATION
#pipeline_options_batch.view_as(StandardOptions).runner = 'DirectRunner'
# # If datflow runner [BEGIN]
pipeline_options_batch.view_as(StandardOptions).runner = 'DataflowRunner'
pipeline_options_batch.view_as(WorkerOptions).autoscaling_algorithm = 'THROUGHPUT_BASED'
#pipeline_options_batch.view_as(WorkerOptions).machine_type = 'n1-standard-96' #'n1-highmem-32' #'
pipeline_options_batch.view_as(WorkerOptions).max_num_workers = 10
# [END]
pipeline_options_batch.view_as(SetupOptions).save_main_session = True
#Needed this in order to pass table to BQ at runtime
pipeline_options_batch.view_as(DebugOptions).experiments = ['use_beam_bq_sink']
with beam.Pipeline(options=pipeline_options_batch) as pipeline_2:
try:
final_data = (
pipeline_2
|'Create empty PCollection' >> beam.Create([None])
|'Get accepted batch file 1/2:{}'.format(OutputValueProviderFn(runtime_options.complete_batch)) >> beam.ParDo(OutputValueProviderFn(runtime_options.complete_batch))
|'Get accepted batch file 2/2:{}'.format(OutputValueProviderFn(runtime_options.complete_batch)) >> beam.ParDo(FileIterator(runtime_options.files_bucket))
|'Read all files' >> beam.io.ReadAllFromText(skip_header_lines=1)
|'Process all files' >> beam.ParDo(ProcessCSV(),COLUMNS_SCHEMA_0)
|'Format all files' >> beam.ParDo(AdfDict())
#|'WriteToBigQuery_{}'.format('test'+str(_millis())) >> beam.io.WriteToBigQuery(
# #dataset= runtime_options.dataset,
# table = str(runtime_options.dataset) + '.' + str(runtime_options.table),
# schema = SCHEMA_ADFImpression,
# project = pipeline_options_batch.view_as(GoogleCloudOptions).project, #options.display_data()['project'],
# create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED, #'CREATE_IF_NEEDED',#create if does not exist.
# write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND #'WRITE_APPEND' #add to existing rows,partitoning
# )
|'WriteToBigQuery' >> beam.ParDo(BQ_flexible_writer(runtime_options.dataset,runtime_options.table))
)
except Exception as exception:
logging.error(exception)
pass
请 运行 使用以下附加选项。
--experiment=use_beam_bq_sink
否则,Dataflow 目前会使用不支持 ValueProviders 的本机版本覆盖 BigQuery 接收器。
此外,请注意,不支持将数据集设置为 运行时间参数。尝试将 table 参数指定为整个 table 引用(DATASET.TABLE 或 PROJECT:DATASET。TABLE)。