"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
[docs]
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
class _Component:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ComponentEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Component.ValueType], builtins.type): # noqa: F821
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
Component_Unknown: _Component.ValueType # 0
Component_Config_Validator: _Component.ValueType # 1
Component_Config_Populator: _Component.ValueType # 2
Component_Data_Preprocessor: _Component.ValueType # 3
Component_Subgraph_Sampler: _Component.ValueType # 4
Component_Split_Generator: _Component.ValueType # 5
Component_Trainer: _Component.ValueType # 6
Component_Inferencer: _Component.ValueType # 7
[docs]
class Component(_Component, metaclass=_ComponentEnumTypeWrapper):
"""Enum for pipeline components"""
[docs]
Component_Unknown: Component.ValueType # 0
[docs]
Component_Config_Validator: Component.ValueType # 1
[docs]
Component_Config_Populator: Component.ValueType # 2
[docs]
Component_Data_Preprocessor: Component.ValueType # 3
[docs]
Component_Subgraph_Sampler: Component.ValueType # 4
[docs]
Component_Split_Generator: Component.ValueType # 5
[docs]
Component_Trainer: Component.ValueType # 6
[docs]
Component_Inferencer: Component.ValueType # 7
[docs]
global___Component = Component
[docs]
class SparkResourceConfig(google.protobuf.message.Message):
"""Configuration for Spark Components"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
MACHINE_TYPE_FIELD_NUMBER: builtins.int
[docs]
NUM_LOCAL_SSDS_FIELD_NUMBER: builtins.int
[docs]
NUM_REPLICAS_FIELD_NUMBER: builtins.int
[docs]
machine_type: builtins.str
"""Machine type for Spark Resource"""
[docs]
num_local_ssds: builtins.int
"""Number of local SSDs"""
[docs]
num_replicas: builtins.int
"""Num workers for Spark Resource"""
def __init__(
self,
*,
machine_type: builtins.str = ...,
num_local_ssds: builtins.int = ...,
num_replicas: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["machine_type", b"machine_type", "num_local_ssds", b"num_local_ssds", "num_replicas", b"num_replicas"]) -> None: ...
[docs]
global___SparkResourceConfig = SparkResourceConfig
[docs]
class DataflowResourceConfig(google.protobuf.message.Message):
"""Configuration for Dataflow Components"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
NUM_WORKERS_FIELD_NUMBER: builtins.int
[docs]
MAX_NUM_WORKERS_FIELD_NUMBER: builtins.int
[docs]
MACHINE_TYPE_FIELD_NUMBER: builtins.int
[docs]
DISK_SIZE_GB_FIELD_NUMBER: builtins.int
[docs]
num_workers: builtins.int
"""Number of workers for Dataflow resources"""
[docs]
max_num_workers: builtins.int
"""Maximum number of workers for Dataflow resources"""
[docs]
machine_type: builtins.str
"""Machine type for Dataflow resources"""
[docs]
disk_size_gb: builtins.int
"""Disk size in GB for Dataflow resources"""
def __init__(
self,
*,
num_workers: builtins.int = ...,
max_num_workers: builtins.int = ...,
machine_type: builtins.str = ...,
disk_size_gb: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["disk_size_gb", b"disk_size_gb", "machine_type", b"machine_type", "max_num_workers", b"max_num_workers", "num_workers", b"num_workers"]) -> None: ...
[docs]
global___DataflowResourceConfig = DataflowResourceConfig
[docs]
class DataPreprocessorConfig(google.protobuf.message.Message):
"""Configuration for Data Preprocessor"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
EDGE_PREPROCESSOR_CONFIG_FIELD_NUMBER: builtins.int
[docs]
NODE_PREPROCESSOR_CONFIG_FIELD_NUMBER: builtins.int
@property
[docs]
def edge_preprocessor_config(self) -> global___DataflowResourceConfig: ...
@property
[docs]
def node_preprocessor_config(self) -> global___DataflowResourceConfig: ...
def __init__(
self,
*,
edge_preprocessor_config: global___DataflowResourceConfig | None = ...,
node_preprocessor_config: global___DataflowResourceConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["edge_preprocessor_config", b"edge_preprocessor_config", "node_preprocessor_config", b"node_preprocessor_config"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["edge_preprocessor_config", b"edge_preprocessor_config", "node_preprocessor_config", b"node_preprocessor_config"]) -> None: ...
[docs]
global___DataPreprocessorConfig = DataPreprocessorConfig
[docs]
class VertexAiTrainerConfig(google.protobuf.message.Message):
"""(deprecated)
Configuration for Vertex AI training resources
"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
MACHINE_TYPE_FIELD_NUMBER: builtins.int
[docs]
GPU_TYPE_FIELD_NUMBER: builtins.int
[docs]
GPU_LIMIT_FIELD_NUMBER: builtins.int
[docs]
NUM_REPLICAS_FIELD_NUMBER: builtins.int
[docs]
machine_type: builtins.str
"""Machine type for training job"""
"""GPU type for training job. Must be set to 'ACCELERATOR_TYPE_UNSPECIFIED' for cpu training."""
[docs]
gpu_limit: builtins.int
"""GPU limit for training job. Must be set to 0 for cpu training."""
[docs]
num_replicas: builtins.int
"""Num workers for training job"""
def __init__(
self,
*,
machine_type: builtins.str = ...,
gpu_type: builtins.str = ...,
gpu_limit: builtins.int = ...,
num_replicas: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["gpu_limit", b"gpu_limit", "gpu_type", b"gpu_type", "machine_type", b"machine_type", "num_replicas", b"num_replicas"]) -> None: ...
[docs]
global___VertexAiTrainerConfig = VertexAiTrainerConfig
[docs]
class KFPTrainerConfig(google.protobuf.message.Message):
"""(deprecated)
Configuration for KFP training resources
"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
CPU_REQUEST_FIELD_NUMBER: builtins.int
[docs]
MEMORY_REQUEST_FIELD_NUMBER: builtins.int
[docs]
GPU_TYPE_FIELD_NUMBER: builtins.int
[docs]
GPU_LIMIT_FIELD_NUMBER: builtins.int
[docs]
NUM_REPLICAS_FIELD_NUMBER: builtins.int
[docs]
cpu_request: builtins.str
"""Num CPU requested for training job (str) which can be a number or a number followed by "m", which means 1/1000"""
[docs]
memory_request: builtins.str
"""Amount of Memory requested for training job (str) can either be a number or a number followed by one of "Ei", "Pi", "Ti", "Gi", "Mi", "Ki"."""
"""GPU type for training job. Must be set to 'ACCELERATOR_TYPE_UNSPECIFIED' for cpu training."""
[docs]
gpu_limit: builtins.int
"""GPU limit for training job. Must be set to 0 for cpu training."""
[docs]
num_replicas: builtins.int
"""Number of replicas for training job"""
def __init__(
self,
*,
cpu_request: builtins.str = ...,
memory_request: builtins.str = ...,
gpu_type: builtins.str = ...,
gpu_limit: builtins.int = ...,
num_replicas: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["cpu_request", b"cpu_request", "gpu_limit", b"gpu_limit", "gpu_type", b"gpu_type", "memory_request", b"memory_request", "num_replicas", b"num_replicas"]) -> None: ...
[docs]
global___KFPTrainerConfig = KFPTrainerConfig
[docs]
class LocalTrainerConfig(google.protobuf.message.Message):
"""(deprecated)
Configuration for Local Training
"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
NUM_WORKERS_FIELD_NUMBER: builtins.int
[docs]
num_workers: builtins.int
def __init__(
self,
*,
num_workers: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["num_workers", b"num_workers"]) -> None: ...
[docs]
global___LocalTrainerConfig = LocalTrainerConfig
[docs]
class VertexAiResourceConfig(google.protobuf.message.Message):
"""Configuration for Vertex AI resources"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
MACHINE_TYPE_FIELD_NUMBER: builtins.int
[docs]
GPU_TYPE_FIELD_NUMBER: builtins.int
[docs]
GPU_LIMIT_FIELD_NUMBER: builtins.int
[docs]
NUM_REPLICAS_FIELD_NUMBER: builtins.int
[docs]
TIMEOUT_FIELD_NUMBER: builtins.int
[docs]
GCP_REGION_OVERRIDE_FIELD_NUMBER: builtins.int
[docs]
machine_type: builtins.str
"""Machine type for job"""
"""GPU type for job. Must be set to 'ACCELERATOR_TYPE_UNSPECIFIED' for cpu."""
[docs]
gpu_limit: builtins.int
"""GPU limit for job. Must be set to 0 for cpu."""
[docs]
num_replicas: builtins.int
"""Num workers for job"""
"""Timeout in seconds for the job. If unset or zero, will use the default @ google.cloud.aiplatform.CustomJob, which is 7 days:
https://github.com/googleapis/python-aiplatform/blob/58fbabdeeefd1ccf1a9d0c22eeb5606aeb9c2266/google/cloud/aiplatform/jobs.py#L2252-L2253
"""
[docs]
gcp_region_override: builtins.str
"""Region override
If provided, then the Vertex AI Job will be launched in the provided region.
Otherwise, will launch jobs in the region specified at CommonComputeConfig.region
ex: "us-west1"
NOTE: If set, then there may be data egress costs from CommonComputeConfig.region -> gcp_region_override
"""
def __init__(
self,
*,
machine_type: builtins.str = ...,
gpu_type: builtins.str = ...,
gpu_limit: builtins.int = ...,
num_replicas: builtins.int = ...,
timeout: builtins.int = ...,
gcp_region_override: builtins.str = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["gcp_region_override", b"gcp_region_override", "gpu_limit", b"gpu_limit", "gpu_type", b"gpu_type", "machine_type", b"machine_type", "num_replicas", b"num_replicas", "timeout", b"timeout"]) -> None: ...
[docs]
global___VertexAiResourceConfig = VertexAiResourceConfig
[docs]
class KFPResourceConfig(google.protobuf.message.Message):
"""Configuration for KFP job resources"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
CPU_REQUEST_FIELD_NUMBER: builtins.int
[docs]
MEMORY_REQUEST_FIELD_NUMBER: builtins.int
[docs]
GPU_TYPE_FIELD_NUMBER: builtins.int
[docs]
GPU_LIMIT_FIELD_NUMBER: builtins.int
[docs]
NUM_REPLICAS_FIELD_NUMBER: builtins.int
[docs]
cpu_request: builtins.str
"""Num CPU requested for job (str) which can be a number or a number followed by "m", which means 1/1000"""
[docs]
memory_request: builtins.str
"""Amount of Memory requested for job (str) can either be a number or a number followed by one of "Ei", "Pi", "Ti", "Gi", "Mi", "Ki"."""
"""GPU type for job. Must be set to 'ACCELERATOR_TYPE_UNSPECIFIED' for cpu."""
[docs]
gpu_limit: builtins.int
"""GPU limit for job. Must be set to 0 for cpu."""
[docs]
num_replicas: builtins.int
"""Number of replicas for job"""
def __init__(
self,
*,
cpu_request: builtins.str = ...,
memory_request: builtins.str = ...,
gpu_type: builtins.str = ...,
gpu_limit: builtins.int = ...,
num_replicas: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["cpu_request", b"cpu_request", "gpu_limit", b"gpu_limit", "gpu_type", b"gpu_type", "memory_request", b"memory_request", "num_replicas", b"num_replicas"]) -> None: ...
[docs]
global___KFPResourceConfig = KFPResourceConfig
[docs]
class LocalResourceConfig(google.protobuf.message.Message):
"""Configuration for Local Jobs"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
NUM_WORKERS_FIELD_NUMBER: builtins.int
[docs]
num_workers: builtins.int
def __init__(
self,
*,
num_workers: builtins.int = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["num_workers", b"num_workers"]) -> None: ...
[docs]
global___LocalResourceConfig = LocalResourceConfig
[docs]
class VertexAiGraphStoreConfig(google.protobuf.message.Message):
"""Configuration for lauching Vertex AI clusters with both graph store and compute pools
Under the hood, this uses Vertex AI Multi-Pool Training
See https://cloud.google.com/vertex-ai/docs/training/distributed-training for more info.
This cluster setup should be used when you want store your graph on separate machines from the compute machines
e.g. you can get lots of big memory machines and separate gpu machines individually,
but getting lots of gpu machines with lots of memory is challenging.
"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
GRAPH_STORE_POOL_FIELD_NUMBER: builtins.int
[docs]
COMPUTE_POOL_FIELD_NUMBER: builtins.int
@property
[docs]
def graph_store_pool(self) -> global___VertexAiResourceConfig: ...
@property
[docs]
def compute_pool(self) -> global___VertexAiResourceConfig: ...
def __init__(
self,
*,
graph_store_pool: global___VertexAiResourceConfig | None = ...,
compute_pool: global___VertexAiResourceConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["compute_pool", b"compute_pool", "graph_store_pool", b"graph_store_pool"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["compute_pool", b"compute_pool", "graph_store_pool", b"graph_store_pool"]) -> None: ...
[docs]
global___VertexAiGraphStoreConfig = VertexAiGraphStoreConfig
[docs]
class DistributedTrainerConfig(google.protobuf.message.Message):
"""(deprecated)
Configuration for distributed training resources
"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
VERTEX_AI_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
KFP_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
LOCAL_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
@property
[docs]
def vertex_ai_trainer_config(self) -> global___VertexAiTrainerConfig: ...
@property
[docs]
def kfp_trainer_config(self) -> global___KFPTrainerConfig: ...
@property
[docs]
def local_trainer_config(self) -> global___LocalTrainerConfig: ...
def __init__(
self,
*,
vertex_ai_trainer_config: global___VertexAiTrainerConfig | None = ...,
kfp_trainer_config: global___KFPTrainerConfig | None = ...,
local_trainer_config: global___LocalTrainerConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["kfp_trainer_config", b"kfp_trainer_config", "local_trainer_config", b"local_trainer_config", "trainer_config", b"trainer_config", "vertex_ai_trainer_config", b"vertex_ai_trainer_config"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["kfp_trainer_config", b"kfp_trainer_config", "local_trainer_config", b"local_trainer_config", "trainer_config", b"trainer_config", "vertex_ai_trainer_config", b"vertex_ai_trainer_config"]) -> None: ...
[docs]
def WhichOneof(self, oneof_group: typing_extensions.Literal["trainer_config", b"trainer_config"]) -> typing_extensions.Literal["vertex_ai_trainer_config", "kfp_trainer_config", "local_trainer_config"] | None: ...
[docs]
global___DistributedTrainerConfig = DistributedTrainerConfig
[docs]
class TrainerResourceConfig(google.protobuf.message.Message):
"""Configuration for training resources"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
VERTEX_AI_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
KFP_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
LOCAL_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
VERTEX_AI_GRAPH_STORE_TRAINER_CONFIG_FIELD_NUMBER: builtins.int
@property
[docs]
def vertex_ai_trainer_config(self) -> global___VertexAiResourceConfig: ...
@property
[docs]
def kfp_trainer_config(self) -> global___KFPResourceConfig: ...
@property
[docs]
def local_trainer_config(self) -> global___LocalResourceConfig: ...
@property
[docs]
def vertex_ai_graph_store_trainer_config(self) -> global___VertexAiGraphStoreConfig: ...
def __init__(
self,
*,
vertex_ai_trainer_config: global___VertexAiResourceConfig | None = ...,
kfp_trainer_config: global___KFPResourceConfig | None = ...,
local_trainer_config: global___LocalResourceConfig | None = ...,
vertex_ai_graph_store_trainer_config: global___VertexAiGraphStoreConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["kfp_trainer_config", b"kfp_trainer_config", "local_trainer_config", b"local_trainer_config", "trainer_config", b"trainer_config", "vertex_ai_graph_store_trainer_config", b"vertex_ai_graph_store_trainer_config", "vertex_ai_trainer_config", b"vertex_ai_trainer_config"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["kfp_trainer_config", b"kfp_trainer_config", "local_trainer_config", b"local_trainer_config", "trainer_config", b"trainer_config", "vertex_ai_graph_store_trainer_config", b"vertex_ai_graph_store_trainer_config", "vertex_ai_trainer_config", b"vertex_ai_trainer_config"]) -> None: ...
[docs]
def WhichOneof(self, oneof_group: typing_extensions.Literal["trainer_config", b"trainer_config"]) -> typing_extensions.Literal["vertex_ai_trainer_config", "kfp_trainer_config", "local_trainer_config", "vertex_ai_graph_store_trainer_config"] | None: ...
[docs]
global___TrainerResourceConfig = TrainerResourceConfig
[docs]
class InferencerResourceConfig(google.protobuf.message.Message):
"""Configuration for distributed inference resources"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
VERTEX_AI_INFERENCER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
DATAFLOW_INFERENCER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
LOCAL_INFERENCER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
VERTEX_AI_GRAPH_STORE_INFERENCER_CONFIG_FIELD_NUMBER: builtins.int
@property
[docs]
def vertex_ai_inferencer_config(self) -> global___VertexAiResourceConfig: ...
@property
[docs]
def dataflow_inferencer_config(self) -> global___DataflowResourceConfig: ...
@property
[docs]
def local_inferencer_config(self) -> global___LocalResourceConfig: ...
@property
[docs]
def vertex_ai_graph_store_inferencer_config(self) -> global___VertexAiGraphStoreConfig: ...
def __init__(
self,
*,
vertex_ai_inferencer_config: global___VertexAiResourceConfig | None = ...,
dataflow_inferencer_config: global___DataflowResourceConfig | None = ...,
local_inferencer_config: global___LocalResourceConfig | None = ...,
vertex_ai_graph_store_inferencer_config: global___VertexAiGraphStoreConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["dataflow_inferencer_config", b"dataflow_inferencer_config", "inferencer_config", b"inferencer_config", "local_inferencer_config", b"local_inferencer_config", "vertex_ai_graph_store_inferencer_config", b"vertex_ai_graph_store_inferencer_config", "vertex_ai_inferencer_config", b"vertex_ai_inferencer_config"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["dataflow_inferencer_config", b"dataflow_inferencer_config", "inferencer_config", b"inferencer_config", "local_inferencer_config", b"local_inferencer_config", "vertex_ai_graph_store_inferencer_config", b"vertex_ai_graph_store_inferencer_config", "vertex_ai_inferencer_config", b"vertex_ai_inferencer_config"]) -> None: ...
[docs]
def WhichOneof(self, oneof_group: typing_extensions.Literal["inferencer_config", b"inferencer_config"]) -> typing_extensions.Literal["vertex_ai_inferencer_config", "dataflow_inferencer_config", "local_inferencer_config", "vertex_ai_graph_store_inferencer_config"] | None: ...
[docs]
global___InferencerResourceConfig = InferencerResourceConfig
[docs]
class SharedResourceConfig(google.protobuf.message.Message):
"""Shared resources configuration"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
class CommonComputeConfig(google.protobuf.message.Message):
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
PROJECT_FIELD_NUMBER: builtins.int
[docs]
REGION_FIELD_NUMBER: builtins.int
[docs]
TEMP_ASSETS_BUCKET_FIELD_NUMBER: builtins.int
[docs]
TEMP_REGIONAL_ASSETS_BUCKET_FIELD_NUMBER: builtins.int
[docs]
PERM_ASSETS_BUCKET_FIELD_NUMBER: builtins.int
[docs]
TEMP_ASSETS_BQ_DATASET_NAME_FIELD_NUMBER: builtins.int
[docs]
EMBEDDING_BQ_DATASET_NAME_FIELD_NUMBER: builtins.int
[docs]
GCP_SERVICE_ACCOUNT_EMAIL_FIELD_NUMBER: builtins.int
[docs]
DATAFLOW_RUNNER_FIELD_NUMBER: builtins.int
"""GCP Project"""
"""GCP Region where compute is to be scheduled"""
[docs]
temp_assets_bucket: builtins.str
"""GCS Bucket for where temporary assets are to be stored"""
[docs]
temp_regional_assets_bucket: builtins.str
"""Regional GCS Bucket used to store temporary assets"""
[docs]
perm_assets_bucket: builtins.str
"""Regional GCS Bucket that will store permanent assets like Trained Model"""
[docs]
temp_assets_bq_dataset_name: builtins.str
"""Path to BQ dataset used to store temporary assets"""
[docs]
embedding_bq_dataset_name: builtins.str
"""Path to BQ Dataset used to persist generated embeddings and predictions"""
[docs]
gcp_service_account_email: builtins.str
"""The GCP service account email being used to schedule compute on GCP"""
[docs]
dataflow_runner: builtins.str
"""The runner to use for Dataflow i.e DirectRunner or DataflowRunner"""
def __init__(
self,
*,
project: builtins.str = ...,
region: builtins.str = ...,
temp_assets_bucket: builtins.str = ...,
temp_regional_assets_bucket: builtins.str = ...,
perm_assets_bucket: builtins.str = ...,
temp_assets_bq_dataset_name: builtins.str = ...,
embedding_bq_dataset_name: builtins.str = ...,
gcp_service_account_email: builtins.str = ...,
dataflow_runner: builtins.str = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["dataflow_runner", b"dataflow_runner", "embedding_bq_dataset_name", b"embedding_bq_dataset_name", "gcp_service_account_email", b"gcp_service_account_email", "perm_assets_bucket", b"perm_assets_bucket", "project", b"project", "region", b"region", "temp_assets_bq_dataset_name", b"temp_assets_bq_dataset_name", "temp_assets_bucket", b"temp_assets_bucket", "temp_regional_assets_bucket", b"temp_regional_assets_bucket"]) -> None: ...
[docs]
class ResourceLabelsEntry(google.protobuf.message.Message):
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
KEY_FIELD_NUMBER: builtins.int
[docs]
VALUE_FIELD_NUMBER: builtins.int
[docs]
value: builtins.str
def __init__(
self,
*,
key: builtins.str = ...,
value: builtins.str = ...,
) -> None: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
[docs]
RESOURCE_LABELS_FIELD_NUMBER: builtins.int
[docs]
COMMON_COMPUTE_CONFIG_FIELD_NUMBER: builtins.int
@property
[docs]
def resource_labels(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ...
@property
[docs]
def common_compute_config(self) -> global___SharedResourceConfig.CommonComputeConfig: ...
def __init__(
self,
*,
resource_labels: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
common_compute_config: global___SharedResourceConfig.CommonComputeConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["common_compute_config", b"common_compute_config"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["common_compute_config", b"common_compute_config", "resource_labels", b"resource_labels"]) -> None: ...
[docs]
global___SharedResourceConfig = SharedResourceConfig
[docs]
class GiglResourceConfig(google.protobuf.message.Message):
"""GiGL resources configuration"""
[docs]
DESCRIPTOR: google.protobuf.descriptor.Descriptor
[docs]
SHARED_RESOURCE_CONFIG_URI_FIELD_NUMBER: builtins.int
[docs]
SHARED_RESOURCE_CONFIG_FIELD_NUMBER: builtins.int
[docs]
PREPROCESSOR_CONFIG_FIELD_NUMBER: builtins.int
[docs]
SUBGRAPH_SAMPLER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
SPLIT_GENERATOR_CONFIG_FIELD_NUMBER: builtins.int
[docs]
TRAINER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
INFERENCER_CONFIG_FIELD_NUMBER: builtins.int
[docs]
TRAINER_RESOURCE_CONFIG_FIELD_NUMBER: builtins.int
[docs]
INFERENCER_RESOURCE_CONFIG_FIELD_NUMBER: builtins.int
[docs]
shared_resource_config_uri: builtins.str
@property
[docs]
def shared_resource_config(self) -> global___SharedResourceConfig: ...
@property
[docs]
def preprocessor_config(self) -> global___DataPreprocessorConfig:
"""Configuration for Data Preprocessor"""
@property
[docs]
def subgraph_sampler_config(self) -> global___SparkResourceConfig:
"""Configuration for Spark subgraph sampler"""
@property
[docs]
def split_generator_config(self) -> global___SparkResourceConfig:
"""Configuration for Spark split generator"""
@property
[docs]
def trainer_config(self) -> global___DistributedTrainerConfig:
"""(deprecated)
Configuration for trainer
"""
@property
[docs]
def inferencer_config(self) -> global___DataflowResourceConfig:
"""(deprecated)
Configuration for inferencer
"""
@property
[docs]
def trainer_resource_config(self) -> global___TrainerResourceConfig:
"""Configuration for distributed trainer"""
@property
[docs]
def inferencer_resource_config(self) -> global___InferencerResourceConfig:
"""Configuration for distributed inferencer"""
def __init__(
self,
*,
shared_resource_config_uri: builtins.str = ...,
shared_resource_config: global___SharedResourceConfig | None = ...,
preprocessor_config: global___DataPreprocessorConfig | None = ...,
subgraph_sampler_config: global___SparkResourceConfig | None = ...,
split_generator_config: global___SparkResourceConfig | None = ...,
trainer_config: global___DistributedTrainerConfig | None = ...,
inferencer_config: global___DataflowResourceConfig | None = ...,
trainer_resource_config: global___TrainerResourceConfig | None = ...,
inferencer_resource_config: global___InferencerResourceConfig | None = ...,
) -> None: ...
[docs]
def HasField(self, field_name: typing_extensions.Literal["inferencer_config", b"inferencer_config", "inferencer_resource_config", b"inferencer_resource_config", "preprocessor_config", b"preprocessor_config", "shared_resource", b"shared_resource", "shared_resource_config", b"shared_resource_config", "shared_resource_config_uri", b"shared_resource_config_uri", "split_generator_config", b"split_generator_config", "subgraph_sampler_config", b"subgraph_sampler_config", "trainer_config", b"trainer_config", "trainer_resource_config", b"trainer_resource_config"]) -> builtins.bool: ...
[docs]
def ClearField(self, field_name: typing_extensions.Literal["inferencer_config", b"inferencer_config", "inferencer_resource_config", b"inferencer_resource_config", "preprocessor_config", b"preprocessor_config", "shared_resource", b"shared_resource", "shared_resource_config", b"shared_resource_config", "shared_resource_config_uri", b"shared_resource_config_uri", "split_generator_config", b"split_generator_config", "subgraph_sampler_config", b"subgraph_sampler_config", "trainer_config", b"trainer_config", "trainer_resource_config", b"trainer_resource_config"]) -> None: ...
[docs]
def WhichOneof(self, oneof_group: typing_extensions.Literal["shared_resource", b"shared_resource"]) -> typing_extensions.Literal["shared_resource_config_uri", "shared_resource_config"] | None: ...
[docs]
global___GiglResourceConfig = GiglResourceConfig