"NVIDIA GPUs work great for complex Optical Character Recognition tasks on poor quality data sets. We use V100 and P100 GPUs on Google Compute Engine to convert millions of handwritten documents, survey drawings, and engineering drawings into machine-readable data. The ability to deploy thousands of Preemptible GPU instances in seconds was vastly superior to the capacity and cost of our previous GPU cloud provider."
— Adam Seabrook, Chief Executive Officer, LeadStage
"V100 GPUs are great for running V-Ray Cloud rendering services. Among all possible hardware configurations that we've tested, V100 ranked #1 on our benchmarking platform. Thanks to V100 GPUs we can use cloud GPUs on-demand on Compute Engine to render our clients' jobs extremely fast." — Boris Simandoff, Director of Engineering, Chaos Group
kubectl create namespace test
kind: Namespace apiVersion: v1 metadata: name: test labels: name: test kubectl apply -f test.yaml
apiVersion: v1 kind: Pod metadata: name: mypod labels: name: mypod spec: containers: - name: mypod image: nginx
kubectl apply -f pod.yaml --namespace=test
apiVersion: v1 kind: Pod metadata: name: mypod namespace: test labels: name: mypod spec: containers: - name: mypod image: nginx
$ kubectl get pods No resources found.
$ kubectl get pods --namespace=test NAME READY STATUS RESTARTS AGE mypod 1/1 Running 0 10s
kubens test
$ kubectl get pods NAME READY STATUS RESTARTS AGE mypod 1/1 Running 0 10m
<Service Aame>.<Namespace Name>.svc.cluster.local
database.test
database.production
${varname}
@backendoncall policy ${policy.display_name} triggered an incident
{ "incident":{ "incident_id":"0.kmttg2it8kr0", "resource_id":"", "resource_name":"totally-new cassweb1", "started_at":1514931579, "policy_name":"Backend processing utilization too high", "condition_name":"Metric Threshold on Instance (GCE) cassweb1", "url":"https://app.google.stackdriver.com/incidents/0.kmttg2it8kr0?project=tot ally-new", "documentation":{ "content":"CPU utilization sample. This might affect our backend processing.\u000AFollowing playbook here: https://my.sample.playbook/cassweb1", "mime_type":"text/markdown" }, "state":"open", "ended_at":null, "summary":"CPU utilization for totally-new cassweb1 is above the threshold of 0.8 with a value of 0.994." }, "version":"1.2" }
gcloud container clusters create example-cluster \ --scopes=compute-rw,gke-default
PROJECT_ID=$(gcloud config get-value project) PRIMARY_ACCOUNT=$(gcloud config get-value account) # Specify your cluster name. CLUSTER=cluster-1 # You may have to grant yourself permission to manage roles kubectl create clusterrolebinding cluster-admin-binding \ --clusterrole cluster-admin --user $PRIMARY_ACCOUNT # Create an IAM service account for the user “gke-pod-reader”, which we will allow to read pods gcloud iam service-accounts create gke-pod-reader \ --display-name "GKE Pod Reader" \ USER_EMAIL=gke-pod-reader@$PROJECT_ID.iam.gserviceaccount.com cat > pod-reader-clusterrole.yaml<<EOF kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: default name: pod-reader rules: - apiGroups: [""] # "" indicates the core API group resources: ["pods"] verbs: ["get", "watch", "list"] EOF kubectl create -f pod-reader-clusterrole.yaml cat > pod-reader-clusterrolebinding.yaml<<EOF kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pod-reader-global subjects: - kind: User name: $USER_EMAIL apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: pod-reader apiGroup: rbac.authorization.k8s.io EOF kubectl create -f pod-reader-clusterrolebinding.yaml # Check the permissions of our Pod Reader user. gcloud iam service-accounts keys create \ --iam-account $USER_EMAIL pod-reader-key.json gcloud container clusters get-credentials $CLUSTER gcloud auth activate-service-account $USER_EMAIL \ --key-file=pod-reader-key.json # Our user can get/list all pods in the cluster. kubectl get pods --all-namespaces # But they can’t see the deployments, services, or nodes. kubectl get deployments --all-namespaces kubectl get services --all-namespaces kubectl get nodes # Reset gcloud and kubectl to your main user. gcloud config set account $PRIMARY_ACCOUNT gcloud container clusters get-credentials $CLUSTER
gcloud config set container/use_v1_api false
FROM node:onbuild EXPOSE 8080
FROM node:alpine WORKDIR /app COPY package.json /app/package.json RUN npm install --production COPY server.js /app/server.js EXPOSE 8080 CMD npm start
FROM golang:onbuild EXPOSE 8080
FROM golang:alpine WORKDIR /app ADD . /app RUN cd /app && go build -o goapp EXPOSE 8080 ENTRYPOINT ./goapp
FROM golang:alpine AS build-env WORKDIR /app ADD . /app RUN cd /app && go build -o goapp FROM alpine RUN apk update && \ apk add ca-certificates && \ update-ca-certificates && \ rm -rf /var/cache/apk/* WORKDIR /app COPY --from=build-env /app/goapp /app EXPOSE 8080 ENTRYPOINT ./goapp
Go Onbuild: 35 Seconds Go Multistage: 23 Seconds
Go Onbuild: 15 Seconds Go Multistage: 14 Seconds
Go Onbuild: 26 Seconds Go Multistage: 6 Seconds
Go Onbuild: 25 Seconds Go Multistage: 20 Seconds
Go Onbuild: 52 seconds Go Multistage: 6 seconds
Go Onbuild: 54 seconds Go Multistage: 28 seconds
Go Onbuild: 48 Seconds Go Multistage: 16 seconds
Demonstrate your proficiency to design, build and manage solutions on Google Cloud Platform.