text
stringlengths 15
267k
|
|---|
# -*- coding: utf-8 -*-
import logging
import unreal
import inspect
import types
import Utilities
from collections import Counter
class attr_detail(object):
def __init__(self, obj, name:str):
self.name = name
attr = None
self.bCallable = None
self.bCallable_builtin = None
try:
if hasattr(obj, name):
attr = getattr(obj, name)
self.bCallable = callable(attr)
self.bCallable_builtin = inspect.isbuiltin(attr)
except Exception as e:
unreal.log(str(e))
self.bProperty = not self.bCallable
self.result = None
self.param_str = None
self.bEditorProperty = None
self.return_type_str = None
self.doc_str = None
self.property_rw = None
if self.bCallable:
self.return_type_str = ""
if self.bCallable_builtin:
if hasattr(attr, '__doc__'):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
# print(f"~~~~~ attr: {self.name} docForDisplay: {docForDisplay} paramStr: {paramStr}")
# print(attr.__doc__)
try:
sig = inspect.getfullargspec(getattr(obj, self.name))
# print("+++ ", sig)
args = sig.args
argCount = len(args)
if "self" in args:
argCount -= 1
except TypeError:
argCount = -1
if "-> " in docForDisplay:
self.return_type_str = docForDisplay[docForDisplay.find(')') + 1:]
else:
self.doc_str = docForDisplay[docForDisplay.find(')') + 1:]
if argCount == 0 or (argCount == -1 and (paramStr == '' or paramStr == 'self')):
# Method with No params
if '-> None' not in docForDisplay or self.name in ["__reduce__", "_post_init"]:
try:
if name == "get_actor_time_dilation" and isinstance(obj, unreal.Object):
# call get_actor_time_dilation will crash engine if actor is get from CDO and has no world.
if obj.get_world():
# self.result = "{}".format(attr.__call__())
self.result = attr.__call__()
else:
self.result = "skip call, world == None."
else:
# self.result = "{}".format(attr.__call__())
self.result = attr.__call__()
except:
self.result = "skip call.."
else:
print(f"docForDisplay: {docForDisplay}, self.name: {self.name}")
self.result = "skip call."
else:
self.param_str = paramStr
self.result = ""
else:
logging.error("Can't find p")
elif self.bCallable_other:
if hasattr(attr, '__doc__'):
if isinstance(attr.__doc__, str):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
if name in ["__str__", "__hash__", "__repr__", "__len__"]:
try:
self.result = "{}".format(attr.__call__())
except:
self.result = "skip call."
else:
# self.result = "{}".format(getattr(obj, name))
self.result = getattr(obj, name)
def post(self, obj):
if self.bOtherProperty and not self.result:
try:
self.result = getattr(obj, self.name)
except:
self.result = "skip call..."
def apply_editor_property(self, obj, type_, rws, descript):
self.bEditorProperty = True
self.property_rw = "[{}]".format(rws)
try:
self.result = eval('obj.get_editor_property("{}")'.format(self.name))
except:
self.result = "Invalid"
def __str__(self):
s = f"Attr: {self.name} paramStr: {self.param_str} desc: {self.return_type_str} result: {self.result}"
if self.bProperty:
s += ", Property"
if self.bEditorProperty:
s += ", Eidtor Property"
if self.bOtherProperty:
s += ", Other Property "
if self.bCallable:
s += ", Callable"
if self.bCallable_builtin:
s += ", Callable_builtin"
if self.bCallable_other:
s += ", bCallable_other"
if self.bHasParamFunction:
s+= ", bHasParamFunction"
return s
def check(self):
counter = Counter([self.bOtherProperty, self.bEditorProperty, self.bCallable_other, self.bCallable_builtin])
# print("counter: {}".format(counter))
if counter[True] == 2:
unreal.log_error(f"{self.name}: {self.bEditorProperty}, {self.bOtherProperty} {self.bCallable_builtin} {self.bCallable_other}")
@property
def bOtherProperty(self):
if self.bProperty and not self.bEditorProperty:
return True
return False
@property
def bCallable_other(self):
if self.bCallable and not self.bCallable_builtin:
return True
return False
@property
def display_name(self, bRichText=True):
if self.bProperty:
return f"\t{self.name}"
else:
# callable
if self.param_str:
return f"\t{self.name}({self.param_str}) {self.return_type_str}"
else:
if self.bCallable_other:
return f"\t{self.name}" # __hash__, __class__, __eq__ ็ญ
else:
return f"\t{self.name}() {self.return_type_str}"
@property
def display_result(self) -> str:
if self.bEditorProperty:
return "{} {}".format(self.result, self.property_rw)
else:
return "{}".format(self.result)
@property
def bHasParamFunction(self):
return self.param_str and len(self.param_str) != 0
def ll(obj):
if not obj:
return None
if inspect.ismodule(obj):
return None
result = []
for x in dir(obj):
attr = attr_detail(obj, x)
result.append(attr)
if hasattr(obj, '__doc__') and isinstance(obj, unreal.Object):
editorPropertiesInfos = _getEditorProperties(obj.__doc__, obj)
for name, type_, rws, descript in editorPropertiesInfos:
# print(f"~~ {name} {type} {rws}, {descript}")
index = -1
for i, v in enumerate(result):
if v.name == name:
index = i
break
if index != -1:
this_attr = result[index]
else:
this_attr = attr_detail(obj, name)
result.append(this_attr)
# unreal.log_warning(f"Can't find editor property: {name}")
this_attr.apply_editor_property(obj, type_, rws, descript)
for i, attr in enumerate(result):
attr.post(obj)
return result
def _simplifyDoc(content):
def next_balanced(content, s="(", e = ")" ):
s_pos = -1
e_pos = -1
balance = 0
for index, c in enumerate(content):
match = c == s or c == e
if not match:
continue
balance += 1 if c == s else -1
if c == s and balance == 1 and s_pos == -1:
s_pos = index
if c == e and balance == 0 and s_pos != -1 and e_pos == -1:
e_pos = index
return s_pos, e_pos
return -1, -1
# bracketS, bracketE = content.find('('), content.find(')')
if not content:
return "", ""
bracketS, bracketE = next_balanced(content, s='(', e = ')')
arrow = content.find('->')
funcDocPos = len(content)
endSign = ['--', '\n', '\r']
for s in endSign:
p = content.find(s)
if p != -1 and p < funcDocPos:
funcDocPos = p
funcDoc = content[:funcDocPos]
if bracketS != -1 and bracketE != -1:
param = content[bracketS + 1: bracketE].strip()
else:
param = ""
return funcDoc, param
def _getEditorProperties(content, obj):
# print("Content: {}".format(content))
lines = content.split('\r')
signFound = False
allInfoFound = False
result = []
for line in lines:
if not signFound and '**Editor Properties:**' in line:
signFound = True
if signFound:
#todo re
# nameS, nameE = line.find('``') + 2, line.find('`` ')
nameS, nameE = line.find('- ``') + 4, line.find('`` ')
if nameS == -1 or nameE == -1:
continue
typeS, typeE = line.find('(') + 1, line.find(')')
if typeS == -1 or typeE == -1:
continue
rwS, rwE = line.find('[') + 1, line.find(']')
if rwS == -1 or rwE == -1:
continue
name = line[nameS: nameE]
type_str = line[typeS: typeE]
rws = line[rwS: rwE]
descript = line[rwE + 2:]
allInfoFound = True
result.append((name, type_str, rws, descript))
# print(name, type, rws)
if signFound:
if not allInfoFound:
unreal.log_warning("not all info found {}".format(obj))
else:
unreal.log_warning("can't find editor properties in {}".format(obj))
return result
def log_classes(obj):
print(obj)
print("\ttype: {}".format(type(obj)))
print("\tget_class: {}".format(obj.get_class()))
if type(obj.get_class()) is unreal.BlueprintGeneratedClass:
generatedClass = obj.get_class()
else:
generatedClass = unreal.PythonBPLib.get_blueprint_generated_class(obj)
print("\tgeneratedClass: {}".format(generatedClass))
print("\tbp_class_hierarchy_package: {}".format(unreal.PythonBPLib.get_bp_class_hierarchy_package(generatedClass)))
def is_selected_asset_type(types):
selectedAssets = Utilities.Utils.get_selected_assets()
for asset in selectedAssets:
if type(asset) in types:
return True;
return False
|
from typing import List, Dict, Set
import unreal
class ColumnHandler:
def __init__(self, table_name):
self.table_name = table_name
self.required_columns = []
self.handled_columns = set()
def set_required_columns(self, columns):
self.required_columns = columns
self.handled_columns.update(columns)
def add_handled_columns(self, columns):
self.handled_columns.update(columns)
def process_columns(self, data):
if not data:
return {'success': False, 'message': '๋ฐ์ดํฐ๊ฐ ๋น์ด์์ต๋๋ค.'}
# ๋ฐ์ดํฐ์ ์ปฌ๋ผ ๋ชฉ๋ก ๊ฐ์ ธ์ค๊ธฐ
columns = data[0].keys() if data else []
# ํ์ ์ปฌ๋ผ ์ฒดํฌ
missing_columns = [col for col in self.required_columns if col not in columns]
if missing_columns:
message = f"{self.table_name} ์ํธ์์ ๋ค์ ํ์ ์ปฌ๋ผ์ด ๋๋ฝ๋์์ต๋๋ค:\n"
message += "\n".join([f"- {col}" for col in missing_columns])
return {'success': False, 'message': message}
# ์ฒ๋ฆฌ๋์ง ์๋ ์ปฌ๋ผ ์ฒดํฌ
unhandled_columns = [col for col in columns if col not in self.handled_columns]
if unhandled_columns:
print(f"๊ฒฝ๊ณ : {self.table_name} ์ํธ์์ ๋ค์ ์ปฌ๋ผ์ ์ฒ๋ฆฌ๋์ง ์์ต๋๋ค:")
print("\n".join([f"- {col}" for col in unhandled_columns]))
return {'success': True, 'message': ''}
def add_handled_column(self, column: str):
"""์ฒ๋ฆฌ๋ ์ปฌ๋ผ ์ถ๊ฐ"""
self.handled_columns.add(column)
def add_handled_columns(self, columns: List[str]):
"""์ฌ๋ฌ ์ฒ๋ฆฌ๋ ์ปฌ๋ผ ์ถ๊ฐ"""
self.handled_columns.update(columns)
|
import unreal
import math
import json
import pprint
import datetime
import os
import csv
import uuid
from enum import Enum
from typing import Any, List, Optional, Dict, TypeVar, Type, Callable, cast
def load_csv(file_path):
grid = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
grid_row = []
for cell in row:
if cell.strip() == '':
grid_row.append(0)
else:
grid_row.append(int(cell))
grid.append(grid_row)
return grid
def create_collision(actor: unreal.PaperSpriteActor, x, y, tile_size):
initial_children_count = actor.root_component.get_num_children_components()
subsystem = unreal.get_engine_subsystem(unreal.SubobjectDataSubsystem)
root_data_handle = subsystem.k2_gather_subobject_data_for_instance(actor)[0]
collision_component = unreal.BoxComponent()
sub_handle, _ = subsystem.add_new_subobject(params=unreal.AddNewSubobjectParams(parent_handle=root_data_handle, new_class=collision_component.get_class()))
subsystem.rename_subobject(handle=sub_handle, new_name=unreal.Text(f"LDTK_Collision_{uuid.uuid4()}"))
new_component: unreal.BoxComponent = actor.root_component.get_child_component(initial_children_count)
new_component.set_box_extent(unreal.Vector(tile_size / 2, tile_size / 2, 64))
new_component.set_relative_location_and_rotation(unreal.Vector((x + (tile_size / 2)), -32, -(y + (tile_size / 2))), unreal.Rotator(90, 0, 0),False, False)
new_component.set_collision_profile_name("BlockAll")
def spawn_collisions_from_grid(grid, actor: unreal.PaperSpriteActor, composite_width, composite_height):
tile_size = 16
for row_index, row in enumerate(grid):
for col_index, cell in enumerate(row):
x = (col_index * tile_size) - (composite_width / 2)
y = row_index * tile_size - (composite_height / 2)
if cell == 1:
create_collision(actor, x, y, tile_size)
def find_all_subfolders(path):
subfolders = []
for root, dirs, files in os.walk(path):
for dir in dirs:
subfolders.append(os.path.join(root, dir))
return subfolders
DirectoryContents = Dict[str, Dict[str, Any]]
def get_directory_contents(path: str) -> dict:
directory_contents = {}
for root, _, files in os.walk(path):
root = os.path.normpath(root)
filtered_files = [file for file in files if file.endswith(('_bg.png', '_composite.png', 'Bg_textures.png', 'Collisions.csv', 'Collisions.png', 'Collisions-int.png', 'data.json', 'Wall_shadows.png'))]
if filtered_files:
directory_contents[root] = {file: None for file in filtered_files}
return directory_contents
def importWorld(folder_name: str):
level_files_location = "LdtkFiles/simplified"
base_directory = "/Game"
ldtk_files_directory = "LdtkFiles"
ldtk_simplified_directory = "simplified"
composite_filename = "_composite"
data_filename = "data.json"
collisions_filename = "Collisions.csv"
if len(str(folder_name)) == 0:
print("Unreal LDtk: No folder name provided. Exiting...")
return
else:
folder_name = str(folder_name)
base_path = os.path.join(base_directory, ldtk_files_directory, folder_name, ldtk_simplified_directory)
content_directory = unreal.Paths.project_content_dir()
level_directory = os.path.join(content_directory, ldtk_files_directory, folder_name, ldtk_simplified_directory).replace("\\", "/")
directories = find_all_subfolders(level_directory)
if directories.__len__() > 0:
print(f"Unreal LDtk: Found {len(directories)} directories in {level_directory}. Beginning import...")
else:
print(f"Unreal LDtk: No directories found in {level_directory}. \nThis might be because you are missing the LdtkFiles directory, or that the folder level name is wrong. Exiting...")
return
entity_index_counter = 0
for index, directory in enumerate(directories):
_, directory_name = os.path.split(directory)
full_path_composite = os.path.join(base_path, directory_name, composite_filename)
full_path_data = os.path.join(level_directory, directory_name, data_filename).replace("\\", "/")
full_path_collisions = os.path.join(level_directory, directory_name, collisions_filename).replace("\\", "/")
composite_exists = unreal.EditorAssetLibrary.does_asset_exist(full_path_composite)
data_exists = os.path.exists(full_path_data)
collisions_exists = os.path.exists(full_path_collisions)
## Creating Sprite ##
if composite_exists:
composite_texture = load_texture_asset(full_path_composite)
composite_sprite = create_sprite_from_texture(composite_texture, directory_name)
else:
print(f"Unreal LDtk: Missing composite texture asset, skipping...")
## Reading JSON file ##
if data_exists:
data_file = open(full_path_data)
data = json.load(data_file)
data_file.close()
composite_spawn_coords = (data['x'] + (data['width'] / 2), data['y'] + (data['height'] / 2), 0)
else:
print(f"Unreal LDtk: Missing data.json file, skipping...")
if (composite_exists and data_exists):
spawned_composite_actor = spawn_sprite_in_world(composite_sprite, (composite_spawn_coords))
## Spawning Entities ##
for _, entities in data['entities'].items():
for index, entity in enumerate(entities):
spawn_entity_in_world(f"LDtk_{entity['id']}_{entity_index_counter}", data['x'] + entity['x'], data['y'] + entity['y'])
entity_index_counter += 1
else:
print(f"Unreal LDtk: Missing composite and/or data.json file, skipping entities...")
## Spawning Collisions ##
if composite_exists and collisions_exists:
grid = load_csv(full_path_collisions)
spawn_collisions_from_grid(grid, spawned_composite_actor, data['width'], data['height'])
else:
print(f"Unreal LDtk: Missing Composite and/or Collisions.csv file, skipping collisions...")
def check_and_delete_existing_sprite(sprite_name):
sprite_path = "/project/" + sprite_name
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in all_actors:
if actor.get_actor_label() == sprite_name:
unreal.EditorLevelLibrary.destroy_actor(actor)
print(f"Deleting existing composite sprite: {actor}")
break
if unreal.EditorAssetLibrary.does_asset_exist(sprite_path):
unreal.EditorAssetLibrary.delete_asset(sprite_path)
def check_and_delete_existing_entity(entity_name):
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in all_actors:
if actor.get_actor_label() == entity_name:
unreal.EditorLevelLibrary.destroy_actor(actor)
print(f"Deleting existing entity: {actor}")
break
def load_texture_asset(texture_path):
texture = unreal.EditorAssetLibrary.load_asset(texture_path)
return texture
def create_sprite_from_texture(texture_asset: unreal.PaperSprite, world_name):
try:
sprite_path = "/project/"
sprite_name = f"LDtk_{world_name}_{texture_asset.get_name()}_sprite"
check_and_delete_existing_sprite(sprite_name=sprite_name)
sprite_package = unreal.AssetToolsHelpers.get_asset_tools().create_asset(asset_name=sprite_name, package_path=sprite_path, asset_class=unreal.PaperSprite, factory=unreal.PaperSpriteFactory())
sprite_package.set_editor_property("source_texture", texture_asset)
print("Sprite saved at: ", sprite_path)
return sprite_package
except:
pass
def spawn_entity_in_world(name, x, y):
location = unreal.Vector(x, 1, -y)
check_and_delete_existing_entity(name)
actor: unreal.Actor = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.Actor().get_class(), location)
if actor:
actor.set_actor_label(name)
print(f"Spawning entity: {actor.get_actor_label()}")
return actor
def spawn_sprite_in_world(sprite, location=(0, 0, 0), scale=(1, 1, 1)):
spawn_location = unreal.Vector(location[0], location[2], -location[1])
scale_vector = unreal.Vector(scale[0], scale[1], scale[2])
actor_transform = unreal.Transform(spawn_location, unreal.Rotator(0, 0, 0), scale_vector)
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(sprite, spawn_location)
if actor:
sprite_component = actor.render_component
if sprite_component:
sprite_component.set_sprite(sprite)
actor.set_actor_scale3d(scale_vector)
actor.set_actor_transform(actor_transform, False, True)
print(f"Spawning composite sprite: {actor.get_actor_label()}")
return actor
return None
#noinspection PyUnresolvedReferences
importWorld(folder_name)
#noinspection PyUnresolvedReferences
print(datetime.datetime.now())
|
import unreal
# instances of unreal classes
editor_level_lib = unreal.EditorLevelLibrary()
editor_filter_lib = unreal.EditorFilterLibrary()
# get all actors and then filter them by class or name
actors = editor_level_lib.get_all_level_actors()
static_meshes = editor_filter_lib.by_class(actors, unreal.StaticMeshActor)
blueprints = editor_filter_lib.by_id_name(actors, "BP_")
#extra classes could be added here
count = 0
# create a mapping between folder names and actors
mapping = {
"StaticMeshes": static_meshes,
"Blueprints": blueprints
#Extra Items could be added here
}
for folder in mapping:
# Loop through each actor and place them in folders
for actor in mapping[folder]:
actor_name = actor.get_fname()
actor.set_folder_path(folder)
unreal.log("Moved {} into {}".format(actor_name, folder))
count += 1
unreal.log("Moved {} actors moved".format(count))
|
# Copyright Epic Games, Inc. All Rights Reserved.
import unreal
# This example is an implementation of an "executor" which is responsible for
# deciding how a queue is rendered, giving you complete control over the before,
# during, and after of each render.
#
# This class is an example of how to make an executor which processes a job in a standalone executable launched with "-game".
# You can follow this example to either do a simple integration (read arguments from the command line as suggested here),
# or it can used to implement an advanced plugin which opens a socket or makes REST requests to a server to find out what
# work it should do, such as for a render farm implementation.
#
# We're building a UClass implementation in Python. This allows the Python to
# integrate into the system in a much more intuitive way but comes with some
# restrictions:
# Python classes cannot be serialized. This is okay for executors because they are
# created for each run and are not saved into assets, but means that you cannot
# implement a settings class as those do need to get saved into preset assets.
# All class properties must be UProperties. This means you cannot use native
# Python sockets.
#
# This class must inherit from unreal.MoviePipelinePythonHostExecutor. This class also
# provides some basic socket and async http request functionality as a workaround for no
# native Python member variables.
#
# If you are trying to write your own executor based on this example, you will need to create
# a /project/ folder and place the custom python file within that folder. Then you will need
# to create an "init_unreal.py" file within that folder and add "import <YourPyModuleName>" to it
# so that Unreal will attempt to parse the class on engine load and it can be spawned by MRQ.
# If your Python file is named MyExecutorWithExtraFeatures.py, then you would add
#
# import MyExecutorWithExtraFeatures
#
# to your init_unreal.py file.
#
# REQUIREMENTS:
# Requires the "Python Editor Script Plugin" to be enabled in your project.
#
# USAGE:
# Use the following command line argument to launch this:
# UnrealEditor-Cmd.exe <path_to_uproject> <map_name> -game -MoviePipelineLocalExecutorClass=/project/.MoviePipelinePythonHostExecutor -ExecutorPythonClass=/project/.MoviePipelineExampleRuntimeExecutor -LevelSequence=<path_to_level_sequence> -windowed -resx=1280 -resy=720 -log
# ie:
# UnrealEditor-Cmd.exe "/project/.uproject" subwaySequencer_P -game -MoviePipelineLocalExecutorClass=/project/.MoviePipelinePythonHostExecutor -ExecutorPythonClass=/project/.MoviePipelineExampleRuntimeExecutor -LevelSequence="/project/.SubwaySequencerMASTER" -windowed -resx=1280 -resy=720 -log
#
# If you are looking for how to render in-editor using Python, see the MoviePipelineEditorExample.py script instead.
@unreal.uclass()
class MoviePipelineExampleRuntimeExecutor(unreal.MoviePipelinePythonHostExecutor):
# Declare the properties of the class here. You can use basic
# Python types (int, str, bool) as well as unreal properties.
# You can use Arrays and Maps (Dictionaries) as well
activeMoviePipeline = unreal.uproperty(unreal.MoviePipeline)
exampleArray = unreal.Array(str) # An array of strings
exampleDict = unreal.Map(str, bool) # A dictionary of strings to bools.
# Constructor that gets called when created either via C++ or Python
# Note that this is different than the standard __init__ function of Python
def _post_init(self):
# Assign default values to properties in the constructor
self.activeMoviePipeline = None
self.exampleArray.append("Example String")
self.exampleDict["ExampleKey"] = True
# Register a listener for socket messages and http messages (unused in this example
# but shown here as an example)
self.socket_message_recieved_delegate.add_function_unique(self, "on_socket_message");
self.http_response_recieved_delegate.add_function_unique(self, "on_http_response_recieved")
# We can override specific UFunctions declared on the base class with
# this markup.
@unreal.ufunction(override=True)
def execute_delayed(self, inPipelineQueue):
# This function is called once the map has finished loading and the
# executor is instantiated. If you needed to retrieve data from some
# other system via a REST api or Socket connection you could do that here.
for x in range(0, 25):
unreal.log_error("This script is meant as an example to build your own and is not meant to be run on its own. Please read the source code (/project/.py) for details on how to make your own. This example automatically overrides various settings on jobs to serve as an example for how to do that in your own script if needed.")
# If your executor needs to make async HTTP calls (such as fetching the value from a third party management library)
# you can do that with the following code:
# newIndex is a unique index for each send_http_request call that you can store
# and retrieve in the on_http_response_recieved function to match the return back
# up to the original intent.
# newIndex = self.send_http_request("https://google.com", "GET", "", unreal.Map(str, str))
# If your executor wants to make TCP socket connections to send data back and forth with a third party software,
# you can do that with the following code:
# Here's an example of how to open a TCP socket connection. This example
# doesn't need one, but is shown here in the event you wish to build a more
# in depth integration into render farm software.
#socketConnected = self.connect_socket("127.0.0.1", 6783)
#if socketConnected == True:
# # Send back a polite hello world message! It will be sent over the socket
# # with a 4 byte size prefix so you know how many bytes to expect before
# # the message is complete.
# self.send_socket_message("Hello World!")
#else:
# unreal.log_warning("This is an example warning for when a socket fails to connect.")
# Here's how we can scan the command line for any additional args such as the path to a level sequence.
(cmdTokens, cmdSwitches, cmdParameters) = unreal.SystemLibrary.parse_command_line(unreal.SystemLibrary.get_command_line())
levelSequencePath = None
try:
levelSequencePath = cmdParameters['LevelSequence']
except:
unreal.log_error("Missing '-LevelSequence=/project/.MySequence' argument")
self.on_executor_errored()
return
# A movie pipeline needs to be initialized with a job, and a job
# should be owned by a Queue so we will construct a queue, make one job
# and then configure the settings on the job. If you want inPipelineQueue to be
# valid, then you must pass a path to a queue asset via -MoviePipelineConfig. Here
# we just make one from scratch with the one level sequence as we didn't implement
# multi-job handling.
self.pipelineQueue = unreal.new_object(unreal.MoviePipelineQueue, outer=self);
unreal.log("Building Queue...")
# Allocate a job. Jobs hold which sequence to render and what settings to render with.
newJob = self.pipelineQueue.allocate_new_job(unreal.MoviePipelineExecutorJob)
newJob.sequence = unreal.SoftObjectPath(levelSequencePath)
# Now we can configure the job. Calling find_or_add_setting_by_class is how you add new settings.
outputSetting = newJob.get_configuration().find_or_add_setting_by_class(unreal.MoviePipelineOutputSetting)
outputSetting.output_resolution = unreal.IntPoint(1280, 720)
outputSetting.file_name_format = "{sequence_name}.{frame_number}"
# Ensure there is something to render
newJob.get_configuration().find_or_add_setting_by_class(unreal.MoviePipelineDeferredPassBase)
# Ensure there's a file output.
newJob.get_configuration().find_or_add_setting_by_class(unreal.MoviePipelineImageSequenceOutput_PNG)
# This is important. There are several settings that need to be
# initialized (just so their default values kick in) and instead
# of requiring you to add every one individually, you can initialize
# them all in one go at the end.
newJob.get_configuration().initialize_transient_settings()
# Now that we've set up the minimum requirements on the job we can created
# a movie render pipeline to run our job. Construct the new object
self.activeMoviePipeline = unreal.new_object(self.target_pipeline_class, outer=self.get_last_loaded_world(), base_type=unreal.MoviePipeline);
# Register to any callbacks we want
self.activeMoviePipeline.on_movie_pipeline_work_finished_delegate.add_function_unique(self, "on_movie_pipeline_finished")
# And finally tell it to start working. It will continue working
# and then call the on_movie_pipeline_finished_delegate function at the end.
self.activeMoviePipeline.initialize(newJob)
# This function is called every frame and can be used to do simple countdowns, checks
# for more work, etc. Can be entirely omitted if you don't need it.
@unreal.ufunction(override=True)
def on_begin_frame(self):
# It is important that we call the super so that async socket messages get processed.
super(MoviePipelineExampleRuntimeExecutor, self).on_begin_frame()
if self.activeMoviePipeline:
unreal.log("Progress: %f" % unreal.MoviePipelineLibrary.get_completion_percentage(self.activeMoviePipeline))
# This is NOT called for the very first map load (as that is done before Execute is called).
# This means you can assume this is the resulting callback for the last open_level call.
@unreal.ufunction(override=True)
def on_map_load(self, inWorld):
# We don't do anything here, but if you were processing a queue and needed to load a map
# to render a job, you could call:
#
# unreal.GameplayStatics.open_level(self.get_last_loaded_world(), mapPackagePath, True, gameOverrideClassPath)
#
# And then know you can continue execution once this function is called. The Executor
# lives outside of a map so it can persist state across map loads.
# Don't call open_level from this function as it will lead to an infinite loop.
pass
# This needs to be overriden. Doens't have any meaning in runtime executors, only
# controls whether or not the Render (Local) / Render (Remote) buttons are locked
# in Editor executors.
@unreal.ufunction(override=True)
def is_rendering(self):
return False
# This declares a new UFunction and specifies the return type and the parameter types
# callbacks for delegates need to be marked as UFunctions.
@unreal.ufunction(ret=None, params=[unreal.MoviePipelineOutputData])
def on_movie_pipeline_finished(self, results):
# We're not processing a whole queue, only a single job so we can
# just assume we've reached the end. If your queue had more than
# one job, now would be the time to increment the index of which
# job you are working on, and start the next one (instead of calling
# on_executor_finished_impl which should be the end of the whole queue)
unreal.log("Finished rendering movie! Success: " + str(results.success))
self.activeMoviePipeline = None
self.on_executor_finished_impl()
@unreal.ufunction(ret=None, params=[str])
def on_socket_message(self, message):
# Message is a UTF8 encoded string. The system expects
# messages to be sent over a socket with a uint32 to describe
# the message size (not including the size bytes) so
# if you wanted to send "Hello" you would send
# uint32 - 5
# uint8 - 'H'
# uint8 - 'e'
# etc.
# Socket messages sent from the Executor will also be prefixed with a size.
pass
@unreal.ufunction(ret=None, params=[int, int, str])
def on_http_response_recieved(self, inRequestIndex, inResponseCode, inMessage):
# This is called when an http response is returned from a request.
# the request index will match the value returned when you made the original
# call, so you can determine the original intent this response is for.
pass
|
import unreal
def spawn_coverage_actor():
# Percorso della Blueprint Class di CoverageActor
blueprint_path = '/project/.CoverageActor_C'
# Carica la Blueprint Class
blueprint_class = unreal.load_asset(blueprint_path)
if not blueprint_class:
unreal.log_error(f"Blueprint class '{blueprint_path}' non trovata.")
return False
# Ottieni il mondo dell'editor
editor_subsystem = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
world = editor_subsystem.get_editor_world()
# Controlla se un attore di questo tipo รจ giร presente nella scena
actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in actors:
if actor.get_class() == blueprint_class:
unreal.log("Coverage Actor esiste giร nella scena.")
return True
# Specifica la posizione e la rotazione dell'attore
actor_location = unreal.Vector(0.0, 0.0, 0.0)
actor_rotation = unreal.Rotator(0.0, 0.0, 0.0)
# Piazzare l'attore nella scena
actor = unreal.EditorLevelLibrary.spawn_actor_from_class(blueprint_class, actor_location, actor_rotation)
if not actor:
unreal.log_error("Attore non รจ stato creato.")
return False
unreal.log("Coverage Actor creato con successo.")
return True
# Esegui la funzione per piazzare l'attore
spawn_coverage_actor()
|
import unreal
levelTools = unreal.Level
editorLevelLibrary = unreal.EditorLevelLibrary
levelSubSys = unreal.get_editor_subsystem(unreal.LevelEditorSubsystem)
#levelTools = unreal.level
#innit new level
newLevel = "myNewLevel"
#create new level
myNewLevel = levelSubSys.new_level("/project/")
#set level as current level
levelSubSys.set_current_level_by_name(newLevel)
#save level
levelSubSys.save_current_level()
|
# _
# (_)
# _ __ ___ __ _ _ __ ___ ___ _ __ _ ___ _ __ ___
# | '_ ` _ \ / _` | '_ ` _ \ / _ \| '_ \| |/ _ \ '_ ` _ \
# | | | | | | (_| | | | | | | (_) | | | | | __/ | | | | |
# |_| |_| |_|\__,_|_| |_| |_|\___/|_| |_|_|\___|_| |_| |_|
# www.mamoniem.com
# www.ue4u.xyz
#Copyright 2022 Muhammad A.Moniem (@_mamoniem). All Rights Reserved.
#
import unreal
workingPath = "/Game/"
@unreal.uclass()
class GetEditorAssetLibrary(unreal.EditorAssetLibrary):
pass
editorAssetLib = GetEditorAssetLibrary()
allAssets = editorAssetLib.list_assets(workingPath, True, False)
allAssetsCount = len(allAssets)
selectedAssetPath = workingPath
with unreal.ScopedSlowTask(allAssetsCount, selectedAssetPath) as slowTask:
slowTask.make_dialog(True)
for asset in allAssets:
_assetData = editorAssetLib.find_asset_data(asset)
_assetName = _assetData.get_asset().get_name()
_assetPathName = _assetData.get_asset().get_path_name()
_assetClassName = _assetData.get_asset().get_class().get_name()
_targetPathName = "/Game/%s%s%s%s%s" % (_assetClassName, "/", _assetName, ".", _assetName)
editorAssetLib.rename_asset(_assetPathName, _targetPathName)
if slowTask.should_cancel():
break
slowTask.enter_progress_frame(1, asset)
|
# Copyright Epic Games, Inc. All Rights Reserved
"""
General Deadline utility functions
"""
# Built-in
from copy import deepcopy
import json
import re
import unreal
def format_job_info_json_string(json_string, exclude_aux_files=False):
"""
Deadline Data asset returns a json string, load the string and format the job info in a dictionary
:param str json_string: Json string from deadline preset struct
:param bool exclude_aux_files: Excludes the aux files from the returned job info dictionary if True
:return: job Info dictionary
"""
if not json_string:
raise RuntimeError(f"Expected json string value but got `{json_string}`")
job_info = {}
try:
intermediate_info = json.loads(json_string)
except Exception as err:
raise RuntimeError(f"An error occurred formatting the Job Info string. \n\t{err}")
project_settings = unreal.get_default_object(unreal.DeadlineServiceEditorSettings)
script_category_mappings = project_settings.script_category_mappings
# The json string keys are camelCased keys which are not the expected input
# types for Deadline. Format the keys to PascalCase.
for key, value in intermediate_info.items():
# Remove empty values
if not value:
continue
# Deadline does not support native boolean so make it a string
if isinstance(value, bool):
value = str(value).lower()
pascal_case_key = re.sub("(^\S)", lambda string: string.group(1).upper(), key)
if (pascal_case_key == "AuxFiles") and not exclude_aux_files:
# The returned json string lists AuxFiles as a list of
# Dictionaries but the expected value is a list of
# strings. reformat this input into the expected value
aux_files = []
for files in value:
aux_files.append(files["filePath"])
job_info[pascal_case_key] = aux_files
continue
# Extra option that can be set on the job info are packed inside a
# ExtraJobOptions key in the json string.
# Extract this is and add it as a flat setting in the job info
elif pascal_case_key == "ExtraJobOptions":
job_info.update(value)
continue
# Resolve the job script paths to be sent to be sent to the farm.
elif pascal_case_key in ["PreJobScript", "PostJobScript", "PreTaskScript", "PostTaskScript"]:
# The path mappings in the project settings are a dictionary
# type with the script category as a named path for specifying
# the root directory of a particular script. The User interface
# exposes the category which is what's in the json string. We
# will use this category to look up the actual path mappings in
# the project settings.
script_category = intermediate_info[key]["scriptCategory"]
script_name = intermediate_info[key]["scriptName"]
if script_category and script_name:
job_info[pascal_case_key] = f"{script_category_mappings[script_category]}/{script_name}"
continue
# Environment variables for Deadline are numbered key value pairs in
# the form EnvironmentKeyValue#.
# Conform the Env settings to the expected Deadline configuration
elif (pascal_case_key == "EnvironmentKeyValue") and value:
for index, (env_key, env_value) in enumerate(value.items()):
job_info[f"EnvironmentKeyValue{index}"] = f"{env_key}={env_value}"
continue
# ExtraInfoKeyValue for Deadline are numbered key value pairs in the
# form ExtraInfoKeyValue#.
# Conform the setting to the expected Deadline configuration
elif (pascal_case_key == "ExtraInfoKeyValue") and value:
for index, (env_key, env_value) in enumerate(value.items()):
job_info[f"ExtraInfoKeyValue{index}"] = f"{env_key}={env_value}"
continue
else:
# Set the rest of the functions
job_info[pascal_case_key] = value
# Remove our custom representation of Environment and ExtraInfo Key value
# pairs from the dictionary as the expectation is that these have been
# conformed to deadline's expected key value representation
for key in ["EnvironmentKeyValue", "ExtraInfoKeyValue"]:
job_info.pop(key, None)
return job_info
def format_plugin_info_json_string(json_string):
"""
Deadline Data asset returns a json string, load the string and format the plugin info in a dictionary
:param str json_string: Json string from deadline preset struct
:return: job Info dictionary
"""
if not json_string:
raise RuntimeError(f"Expected json string value but got `{json_string}`")
plugin_info = {}
try:
info = json.loads(json_string)
plugin_info = info["pluginInfo"]
except Exception as err:
raise RuntimeError(f"An error occurred formatting the Plugin Info string. \n\t{err}")
# The plugin info is listed under the `plugin_info` key.
# The json string keys are camelCased on struct conversion to json.
return plugin_info
def get_deadline_info_from_preset(job_preset=None, job_preset_struct=None):
"""
This method returns the job info and plugin info from a deadline preset
:param unreal.DeadlineJobPreset job_preset: Deadline preset asset
:param unreal.DeadlineJobPresetStruct job_preset_struct: The job info and plugin info in the job preset
:return: Returns a tuple with the job info and plugin info dictionary
:rtype: Tuple
"""
job_info = {}
plugin_info = {}
preset_struct = None
# TODO: Make sure the preset library is a loaded asset
if job_preset is not None:
preset_struct = job_preset.job_preset_struct
if job_preset_struct is not None:
preset_struct = job_preset_struct
if preset_struct:
# Get the Job Info and plugin Info
try:
job_info = dict(unreal.DeadlineServiceEditorHelpers.get_deadline_job_info(preset_struct))
plugin_info = dict(unreal.DeadlineServiceEditorHelpers.get_deadline_plugin_info(preset_struct))
# Fail the submission if any errors occur
except Exception as err:
unreal.log_error(
f"Error occurred getting deadline job and plugin details. \n\tError: {err}"
)
raise
return job_info, plugin_info
def merge_dictionaries(first_dictionary, second_dictionary):
"""
This method merges two dictionaries and returns a new dictionary as a merger between the two
:param dict first_dictionary: The first dictionary
:param dict second_dictionary: The new dictionary to merge in
:return: A new dictionary based on a merger of the input dictionaries
:rtype: dict
"""
# Make sure we do not overwrite our input dictionary
output_dictionary = deepcopy(first_dictionary)
for key in second_dictionary:
if isinstance(second_dictionary[key], dict):
if key not in output_dictionary:
output_dictionary[key] = {}
output_dictionary[key] = merge_dictionaries(output_dictionary[key], second_dictionary[key])
else:
output_dictionary[key] = second_dictionary[key]
return output_dictionary
def get_editor_deadline_globals():
"""
Get global storage that will persist for the duration of the
current interpreter/process.
.. tip::
Please namespace or otherwise ensure unique naming of any data stored
into this dictionary, as key clashes are not handled/safety checked.
:return: Global storage
:rtype: dict
"""
import __main__
try:
return __main__.__editor_deadline_globals__
except AttributeError:
__main__.__editor_deadline_globals__ = {}
return __main__.__editor_deadline_globals__
|
from typing import Dict, Any
import unreal
import json
def get_map_info() -> Dict[str, Any]:
world = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem).get_editor_world()
if not world:
return {"error": "No world loaded"}
map_info = {}
map_info["map_name"] = world.get_name()
map_info["map_path"] = world.get_path_name()
all_actors = unreal.get_editor_subsystem(
unreal.EditorActorSubsystem
).get_all_level_actors()
map_info["total_actors"] = len(all_actors)
actor_types = {}
for actor in all_actors:
actor_class = actor.get_class().get_name()
actor_types[actor_class] = actor_types.get(actor_class, 0) + 1
map_info["actor_types"] = dict(
sorted(actor_types.items(), key=lambda x: x[1], reverse=True)[:15]
)
lighting_info = {}
lighting_info["has_lightmass_importance_volume"] = any(
actor.get_class().get_name() == "LightmassImportanceVolume"
for actor in all_actors
)
lighting_info["directional_lights"] = sum(
1 for actor in all_actors if actor.get_class().get_name() == "DirectionalLight"
)
lighting_info["point_lights"] = sum(
1 for actor in all_actors if actor.get_class().get_name() == "PointLight"
)
lighting_info["spot_lights"] = sum(
1 for actor in all_actors if actor.get_class().get_name() == "SpotLight"
)
map_info["lighting"] = lighting_info
try:
streaming_levels = unreal.EditorLevelLibrary.get_all_level_actors_of_class(
unreal.LevelStreamingDynamic
)
map_info["streaming_levels"] = len(streaming_levels)
map_info["streaming_level_names"] = [
level.get_name() for level in streaming_levels
]
except Exception:
map_info["streaming_levels"] = 0
map_info["streaming_level_names"] = []
return map_info
def main():
map_data = get_map_info()
print(json.dumps(map_data, indent=2))
if __name__ == "__main__":
main()
|
๏ปฟ# coding: utf-8
import unreal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-vrm")
parser.add_argument("-rig")
parser.add_argument("-debugeachsave")
args = parser.parse_args()
#print(args.vrm)
######
rigs = unreal.ControlRigBlueprint.get_currently_open_rig_blueprints()
print(rigs)
for r in rigs:
s:str = r.get_path_name()
ss:str = args.rig
if (s.find(ss) < 0):
print("no rig")
else:
rig = r
h_mod = rig.get_hierarchy_modifier()
elements = h_mod.get_selection()
print(unreal.SystemLibrary.get_engine_version())
if (unreal.SystemLibrary.get_engine_version()[0] == '5'):
c = rig.get_controller()
else:
c = rig.controller
g = c.get_graph()
n = g.get_nodes()
mesh = rig.get_preview_mesh()
morphList = mesh.get_all_morph_target_names()
morphListWithNo = morphList[:]
morphListRenamed = []
morphListRenamed.clear()
for i in range(len(morphList)):
morphListWithNo[i] = '{}'.format(morphList[i])
print(morphListWithNo)
###### root
key = unreal.RigElementKey(unreal.RigElementType.SPACE, 'MorphControlRoot_s')
space = h_mod.get_space(key)
if (space.get_editor_property('index') < 0):
space = h_mod.add_space('MorphControlRoot_s', space_type=unreal.RigSpaceType.SPACE)
else:
space = key
a:unreal.RigUnit_CollectionItems = unreal.RigUnit_CollectionItems()
print(a)
# ้
ๅใใผใ่ฟฝๅ
values_forCurve:unreal.RigVMStructNode = []
items_forControl:unreal.RigVMStructNode = []
items_forCurve:unreal.RigVMStructNode = []
for node in n:
print(node)
print(node.get_node_title())
# set curve num
if (node.get_node_title() == 'For Loop'):
print(node)
pin = node.find_pin('Count')
print(pin)
c.set_pin_default_value(pin.get_pin_path(), str(len(morphList)))
# curve name array pin
if (node.get_node_title() == 'Select'):
print(node)
pin = node.find_pin('Values')
#print(pin)
#print(pin.get_array_size())
#print(pin.get_default_value())
values_forCurve.append(pin)
# items
if (node.get_node_title() == 'Items'):
if ("Type=Curve," in c.get_pin_default_value(node.find_pin('Items').get_pin_path())):
items_forCurve.append(node.find_pin('Items'))
else:
items_forControl.append(node.find_pin('Items'))
print(values_forCurve)
# reset controller
for e in reversed(h_mod.get_elements()):
if (e.type!= unreal.RigElementType.CONTROL):
continue
tmp = h_mod.get_control(e)
if (tmp.space_name == 'MorphControlRoot_s'):
#if (str(e.name).rstrip('_c') in morphList):
# continue
print('delete')
#print(str(e.name))
h_mod.remove_element(e)
# curve array
for v in values_forCurve:
c.clear_array_pin(v.get_pin_path())
for morph in morphList:
tmp = "{}".format(morph)
c.add_array_pin(v.get_pin_path(), default_value=tmp)
# curve controller
for morph in morphListWithNo:
name_c = "{}_c".format(morph)
key = unreal.RigElementKey(unreal.RigElementType.CONTROL, name_c)
try:
control = h_mod.get_control(key)
if (control.get_editor_property('index') < 0):
k = h_mod.add_control(name_c,
control_type=unreal.RigControlType.FLOAT,
space_name=space.name,
gizmo_color=[1.0, 0.0, 0.0, 1.0],
)
control = h_mod.get_control(k)
except:
k = h_mod.add_control(name_c,
control_type=unreal.RigControlType.FLOAT,
space_name=space.name,
gizmo_color=[1.0, 0.0, 0.0, 1.0],
)
control = h_mod.get_control(k)
control.set_editor_property('gizmo_visible', False)
control.set_editor_property('gizmo_enabled', False)
h_mod.set_control(control)
morphListRenamed.append(control.get_editor_property('name'))
if (args.debugeachsave == '1'):
try:
unreal.EditorAssetLibrary.save_loaded_asset(rig)
except:
print('save error')
#unreal.SystemLibrary.collect_garbage()
# curve Control array
for v in items_forControl:
c.clear_array_pin(v.get_pin_path())
for morph in morphListRenamed:
tmp = '(Type=Control,Name='
tmp += "{}".format(morph)
tmp += ')'
c.add_array_pin(v.get_pin_path(), default_value=tmp)
# curve Float array
for v in items_forCurve:
c.clear_array_pin(v.get_pin_path())
for morph in morphList:
tmp = '(Type=Curve,Name='
tmp += "{}".format(morph)
tmp += ')'
c.add_array_pin(v.get_pin_path(), default_value=tmp)
|
# from wrmd_sim.wrmd_similarity import find_dist
# import speech_recognition as sr
# flag = 1
# while (flag):
# r = sr.Recognizer()
# with sr.Microphone() as source:
# r.adjust_for_ambient_noise(source)
# print("Please say something")
# audio = r.listen(source)
# print("Recognizing Now .... ")
# # recognize speech using google
# try:
# print("\nYou have said: \n" + r.recognize_google(audio) + "\n")
# user_input = input("Press\n- 0, if this is incorrec/project/- 1, if this is correct ")
# if int(user_input) == 0:
# flag = 1
# elif int(user_input) == 1:
# flag = 0
# except Exception as e:
# print("Error : " + str(e))
# # write audio
# with open("recorded.wav", "wb") as f:
# f.write(audio.get_wav_data())
import unreal
import sng_parser
import re
from sentence_transformers import SentenceTransformer, util
import torch
from rdflib.namespace import FOAF, XMLNS, XSD, RDF, RDFS, OWL
import rdflib.plugins.sparql as sparql
from rdflib import Graph, URIRef, Literal, BNode, Namespace
import networkx as nx
import matplotlib.pyplot as plt
import sng_parser
populated_kg_path = "/project/.ttl"
# Update KG of assets
#################################################
import os
import json
# find if directory is '../Megascans Library/project/' or '../Megascans Library/UAssets'
path_to_files = '/project/'
from rdflib import Graph, URIRef, Literal, BNode, Namespace
from rdflib.namespace import FOAF, XMLNS, XSD, RDF, RDFS, OWL
import os
g = Graph()
g.parse(populated_kg_path, format = "ttl")
n = Namespace("http://www.semanticweb.org/project/-assets-ontology#")
# find if directory is '../Megascans Library/project/' or '../Megascans Library/UAssets'
def trigger_update(folder_path):
# g = Graph()
# g.parse('Populated_Assets_KG.ttl', format = "ttl")
# n = Namespace("http://www.semanticweb.org/project/-assets-ontology#")
qres = g.query(
"""
PREFIX mega:<http://www.semanticweb.org/project/-assets-ontology#>
SELECT ?s ?aID ?aName WHERE {
?s mega:assetID ?aID.
?s mega:assetName ?aName.
FILTER BOUND(?aID)
}""")
cur_KG_assets = set()
for row in qres:
# check asset IDs currently loaded in the assets' KG - they have IDs same as their folder and json file names
cur_KG_assets.add(row.aID.toPython())
cur_KG_assets = list(cur_KG_assets)
# cur_dir = os.chdir(folder_path)
# Get list of all asset folder names, then iteratively go through them.
saved_folders = os.listdir(folder_path)
saved_asset_paths = dict()
# sf = asset folder name
# sf - also asset ID
for sf in saved_folders:
try:
if len(os.listdir(str(folder_path)+str(sf))) != 0 and str(sf)+'.json' in os.listdir(str(folder_path)+str(sf)+'/'):
saved_asset_paths[sf] = str(folder_path)+str(sf)+'/'+str(sf)+'.json'
except Exception as e:
if str(e).find('NotADirectoryError') != -1:
print("Actual error:", e)
print("Paths for assets in file system: ", saved_asset_paths)
new_paths_dict = dict()
for key,value in saved_asset_paths.items():
if key not in cur_KG_assets:
new_paths_dict[key] = value
print("Assets not yet added to KG:", new_paths_dict)
return new_paths_dict
def update_KG(new_files_paths):
print("New files path: ", new_files_paths,' ', len(new_files_paths))
# correct
n = Namespace("http://www.semanticweb.org/project/-assets-ontology#")
# json_files = [pos_json for pos_json in os.listdir(
# new_files_paths) if pos_json.endswith('.json')]
json_files = list(new_files_paths.values())
for fil in json_files:
# Open asset file
try:
f = open(fil)
data = json.load(f) # json file loaded as dictionary
# Create asset and set asset name
# obtain name of asset, convert it to RDF format literal
name = data["name"]
name_processed = re.sub("[^a-zA-Z0-9]", "_", name)
# print("Created asset name = ", name)
print(name_processed)
# create a resource in the Assets Knowledge Graph (KG)
asset = URIRef(str(n)+"Asset_"+name_processed)
# print("Asset = ", asset)
g.add((asset, RDF.type, n.Asset))
# Add triple to KG: {new Asset resource, nameRelation (from schema), name obtained from asset file}
g.add((asset, n.assetName, Literal(name, datatype=XSD.string)))
try:
# Set asset ID
id = Literal(str(data["id"]), datatype=XSD.string)
g.add((asset, n.assetID, id))
print("Added assetID = ", id)
except:
print("No asset ID specified for asset", asset)
try:
# Set asset search tags
tags = data["tags"]
# print("number of tags = ", len(tags))
for t in tags:
tag = Literal(str(t), datatype=XSD.string)
g.add((asset, n.assetTag, tag))
# print("Added tag = ", tag)
except:
print("No search tags specified for asset", asset)
try:
# Create category individuals and set their names, then assign asset categories
categories = data["categories"]
# print("number of categories = ", len(categories))
for c in categories:
category = Literal(c, datatype=XSD.string)
g.add((asset, n.assetCategory, category))
# print("Adding category = ", category)
except:
print("No categories specified for asset", asset)
properties = data["properties"]
# print("number of properties = ", len(properties))
for prop in properties:
key = list(prop.keys())[0]
value = list(prop.values())[0]
# print("prop['key'] = ", prop["key"])
# print("prop['value'] = ", prop["value"])
try:
properties = data["properties"]
# print("number of properties = ", len(properties))
for prop in properties:
key = list(prop.keys())[0]
value = list(prop.values())[0]
# print("prop['key'] = ", prop["key"])
# print("prop['value'] = ", prop["value"])
# print("prop key = ", key)
if prop["key"] == "size":
# print(value, Literal(value))
if prop["value"] == "tiny":
g.add((asset, n.assetSize, n.tiny))
# print("Added asset size = ", n.tiny)
elif prop["value"] == "small":
g.add((asset, n.assetSize, n.small))
# print("Added asset size = ", n.small)
elif prop["value"] == "medium":
g.add((asset, n.assetSize, n.medium))
# print("Added asset size = ", n.medium)
elif prop["value"] == "large":
g.add((asset, n.assetSize, n.large))
# print("Added asset size = ", n.large)
else:
g.add((asset, n.assetSize, n.extra_large))
# print("Added asset size = ", n.extra_large)
elif prop["key"] == "age":
age = Literal(prop["value"], datatype=XSD.string)
g.add((asset, n.assetAge, age))
# print("Added asset age = ", age)
except:
print("No properties specified for asset", asset)
try:
# Set asset average color
avg_color = Literal(str(data["averageColor"]), datatype=XSD.string)
g.add((asset, n.assetAvgColor, avg_color))
# print("Added asset average color = ", avg_color)
except:
print("No asset avg color specified for asset", asset)
print(len(g))
f.close()
except Exception as e:
print('Update error: ', e)
g.serialize(destination='Populated_Assets_KG.ttl', format='turtle')
#################################################
def find_dist_cosine(sen_a, sen_b):
return util.cos_sim(sen_a, sen_b)
def sub_closest_prep(preposition):
# above
prep_list = [ "on","back","front","right","left","behind","beneath","in","over","under","besides"]
min_dist = -1000
target_prep = ""
for candidate in prep_list:
dist = find_dist_cosine(create_embedding_tensor(candidate), create_embedding_tensor(preposition))
if dist > min_dist:
min_dist = dist
target_prep = ""+candidate
print("Closest preposition for ", preposition, " is ", target_prep)
return target_prep
def find_dist(sen_a, sen_b, model_name):
loaded_model = FastText.load(model_name)
wv = loaded_model.wv
distance = wv.wmdistance(sen_a, sen_b)
# print(f"Word Movers Distance is {distance} (lower means closer)")
return distance
N_id = 1
jar_path = 'stanford-corenlp-4.5.1/stanford-corenlp-4.5.1.jar'
models_jar_path = 'stanford-corenlp-4.5.1/stanford-corenlp-4.5.1-models.jar'
def formatAssetWriting(entity_asset_tuple, subject_all_objects_dict):
formattedDict = dict()
# In the new dict, store in subject_all_objects_dict values, but format its keys to
# AssetName_AssetID (entity_asset_dict.keys) +_+<entityID> (subject_all_objects_dict.keys)
# Compare - from subject_all_objects_dict, remove from keys and value tuples, everything after & including the last '_'
# - replace remaining keys & values in subject_all_objects_dict with values of keys that match with them
# EXAMPLE
# Entity Asset Dictionary:
# {'Cow': 'Wooden_Chair_uknkaffaw', 'sidewalk': 'Brick_Wall_vgvmcgf', 'city area': 'Brick_Wall_vgvmcgf', 'shops': 'Wooden_Chair_uknkaffaw'}
# All Subject-Objects Dictionary:
# {'Cow_0': [('on', 'sidewalk_1'), ('in', 'city area_2')], 'city area_2': [('near', 'shops_3')]}
# RESULT
# {'Wooden_Chair_uknkaffaw_0': [('on', 'Brick_Wall_vgvmcgf_1'), ('in', 'Brick_Wall_vgvmcgf_2')], 'Brick_Wall_vgvmcgf_2': [('near', 'Wooden_Chair_uknkaffaw_3')]}
for k, v in subject_all_objects_dict.items():
formattedKey = k
formattedVal = v
# print("Entity Asset Dic/project/", entity_asset_dict)
# if 'Cow_0'.repl
# if k[:k.rfind('_')] in list(entity_asset_dict.keys()):
if k[:k.rfind('_')] == entity_asset_tuple[0]:
print('entity_asset_tuple:', entity_asset_tuple[2].replace(
' ', '_') + '_' + entity_asset_tuple[1] + k[k.rfind('_'):])
formattedKey = entity_asset_tuple[2].replace(
' ', '_') + '_' + entity_asset_tuple[1] + k[k.rfind('_'):]
for values in v:
prep, val = list(values)
if val[:val.rfind('_')] == entity_asset_tuple[0]:
print('preposiiton - entity_asset_tuple:', prep, ' ', entity_asset_tuple[2].replace(
' ', '_') + '_' + entity_asset_tuple[1] + val[val.rfind('_'):])
formattedVal = (
prep, entity_asset_tuple[2].replace(
' ', '_') + '_' + entity_asset_tuple[1] + val[val.rfind('_'):])
print("Formatted prep-values = ", prep + ' ' + val)
try:
# case: if key exists
check = formattedDict[formattedKey]
formattedDict[formattedKey].append(formattedVal)
except:
# case: if key is not yet added to dictionary
formattedDict[formattedKey] = [formattedVal]
return formattedDict
def subjectAllObjectsDict(scenegraph):
# function to take in scene graph, return dictionary with key being the subject+'_'+uniqueEntityID from relations_list, and value being a list of all the objects and their relations with the subject
# e.g. A green leaf sits on a gray boulder and under a pebble.
# {'leaf_0': [(on, boulder_1), (under, pebble_2)]}
print("Obtained scene graph:", scenegraph)
objects = []
entities_list = scenegraph["entities"]
print("Entities List: ", entities_list)
for entity in entities_list:
objects.append(entity["head"])
# if :
relations_list = scenegraph["relations"]
print("relations", relations_list)
max_ID = 0
print('Printing relations: ', '\n')
subject_objects_dict = dict()
for relation in relations_list:
# subject+'_'+uniqueEntityID
subject = objects[relation.get("subject")]+'_'+str(relation['subject'])
if relation['subject'] > max_ID:
max_ID = relation['subject']
relation_object = (
relation['relation'], objects[relation.get("object")]+'_'+str(relation['object']))
if relation['object'] > max_ID:
max_ID = relation['object']
if subject in list(subject_objects_dict.keys()):
subject_objects_dict[subject].append(relation_object)
else:
subject_objects_dict[subject] = [relation_object]
print("Setting key-value ", subject,
": ", subject_objects_dict[subject])
for entity in entities_list:
if entity['head'] not in list(subject_objects_dict.keys()) and entity['head'] not in list(subject_objects_dict.values()):
max_ID+=1
print("No relation found.")
subject_objects_dict[entity['head']+'_'+str(max_ID)] = [("", "")]
return subject_objects_dict
def scenegraphGraph(scenegraph):
G = nx.DiGraph()
objects = []
similarity_input = {}
# HEAD
entities_list = scenegraph["entities"]
for entity in entities_list:
val = entity["head"]
similarity_input[val] = []
objects.append(val)
G.add_node(val)
# MODIFIERS
for entity in entities_list:
for modifier_dict in entity["modifiers"]:
modifier = modifier_dict.get("span")
G.add_node(modifier)
object = entity["head"]
G.add_edge(object, modifier)
similarity_input[object].append(modifier)
# RELATIONS
relations_list = scenegraph["relations"]
print('Printing relations: ', '\n')
for relation in relations_list:
print(relation)
G.add_edge(objects[relation.get("subject")], objects[relation.get(
"object")], label=relation.get("relation"))
pos = nx.spring_layout(G)
node_size = 800
nx.draw(G, with_labels=True, node_size=node_size)
edge_labels = nx.get_edge_attributes(G, "label")
label_pos = 0.5
nx.draw_networkx_edge_labels(
G, pos, edge_labels=edge_labels, label_pos=label_pos)
plt.show()
return similarity_input
def spanList(scenegraph):
entities_list = scenegraph["entities"]
for entity in entities_list:
print(entity["lemma_span"])
def scenegraphTable(sentence):
# Here we just use the default parser.
parserOutput = sng_parser.parse(sentence)
print('Default Parser Output: \n', parserOutput)
sng_parser.tprint(parserOutput)
return scenegraphGraph(parserOutput), parserOutput
# Scene graph parsing completed here
#########################################################
print("Loading SentenceTransformer Model...")
sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
print("Loaded.")
g = Graph()
g.parse(populated_kg_path, format="ttl")
n = Namespace("http://www.semanticweb.org/project/-assets-ontology#")
#######################################################################
# util functions
def create_embedding_tensor(text):
embedding_tensor = sentence_model.encode(text)
# print()
return embedding_tensor
def str_to_tensor(string):
str_list = string.split(',')
float_list = [float(x) for x in str_list]
tenser_res = torch.tensor(float_list)
return tenser_res
def tensor_to_str(tensor):
tensor_list = tensor.tolist()
tensor_list_string = ""
for l in tensor_list:
tensor_list_string += str(l) + ','
tensor_list_string = tensor_list_string[:-1] # remove last ','
return tensor_list_string
def concatAssetIntoSen(KG_info):
assetSentenceForm = dict()
try:
for k, v in list(KG_info[1].items()): # dict of asset ID and Name
# print("setting for asset id:", k.replace(k[:k.rfind("_"), ""]))
assetSentenceForm[k[k.rfind('_')+1:]] = ""+v
except Exception as e:
print("Name adding unsuccessful", e)
try:
for k, v in list(KG_info[2].items()): # dict of asset Tag
assetSentenceForm[k[k.rfind('_')+1:]] += ' ' + v
except:
print("Tag adding unsuccessful")
try:
# dict of asset Size - placed size at start
for k, v in list(KG_info[3].items()):
assetSentenceForm[k[k.rfind('_')+1:]] = v[v.rfind('#')+1:] + ' sized ' + \
assetSentenceForm[k[k.rfind('_')+1:]]
except:
print("Size adding unsuccessful")
# print("After concatenating assets into sentences des/project/", assetSentenceForm)
return assetSentenceForm
def find_similar_asset_cos(in_asset_desc_pair, KG_info):
wrmd_scores = []
in_asset_Name = list(in_asset_desc_pair.keys())[0]
in_asset_Desc = list(in_asset_desc_pair.values())[0]
# KG_info -> [assetID dict, asset Names dict, asset Tags dict, asset Sizes dict]
KG_info_dict = concatAssetIntoSen(KG_info)
print()
KG_sentences_vals = list(KG_info_dict.values())
for val in KG_sentences_vals:
wrmd_scores.append(find_dist_cosine(create_embedding_tensor(
in_asset_Desc), create_embedding_tensor(val)))
# cosine_scores = util.cos_sim(input_string, list(KG_tensor_dict.values()))
# Find the pair with the highest cosine similarity score
max_score = -1
max_index = -1
for i in range(len(wrmd_scores)):
if wrmd_scores[i] > max_score:
max_score = wrmd_scores[i]
max_index = i
closest_asset_ID = ""
for id, sen in KG_info_dict.items():
if sen == KG_sentences_vals[max_index]:
closest_asset_ID = id
break
try:
KG_info[0][str(
'http://www.semanticweb.org/project/-assets-ontology#Asset_') + closest_asset_ID]
except Exception as e:
print("e for asset ID: ", e)
try:
KG_info[1][str(
'http://www.semanticweb.org/project/-assets-ontology#Asset_') + closest_asset_ID]
except Exception as e:
print("e for asset Name: ", e)
try:
KG_info[2][str(
'http://www.semanticweb.org/project/-assets-ontology#Asset_') + closest_asset_ID]
except Exception as e:
print("e for asset Tags: ", e)
try:
KG_info[3][str(
'http://www.semanticweb.org/project/-assets-ontology#Asset_') + closest_asset_ID]
except Exception as e:
print("e for asset Size: ", e)
# now return the entity name against which the match was being asset ID and Name for file writing formatting
return in_asset_Name, KG_info[0][str('http://www.semanticweb.org/project/-assets-ontology#Asset_') + closest_asset_ID], KG_info[1][str('http://www.semanticweb.org/project/-assets-ontology#Asset_') + closest_asset_ID]
def find_similar_asset(input_string, KG_info):
cosine_scores = []
KG_tensor_dict = KG_info[0]
KG_tensors_values = list(KG_tensor_dict.values())
for val in KG_tensors_values:
cosine_scores.append(util.cos_sim(input_string, val))
# cosine_scores = util.cos_sim(input_string, list(KG_tensor_dict.values()))
# Find the pair with the highest cosine similarity score
max_score = -1
max_index = -1
for i in range(len(cosine_scores)):
if cosine_scores[i] > max_score:
max_score = cosine_scores[i]
max_index = i
closest_asset_ID = ""
for id, tensor in KG_tensor_dict.items():
if torch.equal(tensor, KG_tensors_values[max_index]):
closest_asset_ID = id
break
return KG_info[0][closest_asset_ID], KG_info[1][closest_asset_ID], KG_info[2][closest_asset_ID]
#######################################################################
# form embeddings for entities mentioned in input
def scene_graph_list(sceneGraph_dict):
print("\nSCENE GRAPH DICT\n:", sceneGraph_dict)
all_spawning_asset_desc = []
asset_list_dict = dict()
entities = list(sceneGraph_dict.keys())
for key, value in sceneGraph_dict.items():
desc = ""
for modifier in value:
desc += modifier + ' '
all_spawning_asset_desc.append(desc+key)
# list of all tensors for all nouns and their modifiers - later for matching with assets
all_spawning_asset_list = []
for i in range(len(entities)):
# for str_desc in all_spawning_asset_desc:
asset_desc = all_spawning_asset_desc[i]
all_spawning_asset_list.append(asset_desc)
asset_list_dict[entities[i]] = asset_desc
return all_spawning_asset_list, asset_list_dict
def scene_graph_tensors(sceneGraph_dict):
all_spawning_asset_desc = []
asset_tensor_dict = dict()
entities = list(sceneGraph_dict.keys())
for key, value in sceneGraph_dict.items():
desc = ""
for modifier in value:
desc += modifier + ' '
all_spawning_asset_desc.append(desc+key)
# list of all tensors for all nouns and their modifiers - later for matching with assets
all_spawning_asset_tensor = []
for i in range(len(entities)):
# for str_desc in all_spawning_asset_desc:
asset_tensor_desc = create_embedding_tensor(all_spawning_asset_desc[i])
all_spawning_asset_tensor.append(asset_tensor_desc)
asset_tensor_dict[entities[i]] = asset_tensor_desc
return all_spawning_asset_tensor, asset_tensor_dict
def get_asset_concatTags_dict(tags_list):
asset_concatTags_dict = {}
# iterate over list - for all elements[0] being same, concatenate to tags_by_asset_xyz to form a sentence
for elem in tags_list:
if elem[0] in asset_concatTags_dict.keys():
asset_concatTags_dict[elem[0]
] += str(' ' + asset_concatTags_dict[elem[1]])
else:
asset_concatTags_dict[elem[0]] = str(
asset_concatTags_dict[elem[1]])
print("Finally, assets with all tags in sentenc/project/", asset_concatTags_dict)
return asset_concatTags_dict
def get_KG_assets():
qres = g.query(
"""
PREFIX mega:<http://www.semanticweb.org/project/-assets-ontology#>
SELECT ?s ?aID ?aName ?aTag ?aSize
WHERE
{
{?s mega:assetTag ?aTag}
UNION
{?s mega:assetID ?aID}
UNION
{?s mega:assetName ?aName}
UNION
{?s mega:assetSize ?aSize}
}
""")
asset_Tag_dict = {}
asset_ID_dict = {}
asset_Name_dict = {}
asset_Size_dict = {}
count = 0
for row in qres:
try:
asset_ID_dict[row.s.toPython()] = row.aID.toPython()
except Exception as e:
count += 1
try:
asset_Name_dict[row.s.toPython()] = row.aName.toPython()
except Exception as e:
count += 1
try:
asset_Size_dict[row.s.toPython()] = row.aSize.toPython()
except Exception as e:
count += 1
try:
if row.aTag is not None:
# print("Adding tags for ", row.s.toPython())
asset_Tag_dict[row.s.toPython()] += ' ' + row.aTag.toPython()
# print("Updated asset tags list")
except Exception as e:
if row.aTag is not None:
asset_Tag_dict[row.s.toPython()] = row.aTag.toPython()
count += 1
return asset_ID_dict, asset_Name_dict, asset_Tag_dict, asset_Size_dict
def get_KG_asset_tensors():
qres = g.query(
"""
PREFIX mega:<http://www.semanticweb.org/project/-assets-ontology#>
SELECT ?s ?aID ?aName ?aTensor
WHERE
{
{?s mega:assetTensor ?aTensor}
UNION
{?s mega:assetID ?aID}
UNION
{?s mega:assetName ?aName}
}
""")
asset_Tensor_dict = {}
asset_ID_dict = {}
asset_Name_dict = {}
count = 0
for row in qres:
try:
asset_Tensor_dict[row.s.toPython()] = str_to_tensor(
row.aTensor.toPython())
except Exception as e:
count += 1
try:
asset_ID_dict[row.s.toPython()] = row.aID.toPython()
except Exception as e:
count += 1
try:
asset_Name_dict[row.s.toPython()] = row.aName.toPython()
except Exception as e:
count += 1
return asset_Tensor_dict, asset_ID_dict, asset_Name_dict
def entity_sub_id(entity, all_assets_dicts):
print("entity = ", entity)
print("all_assets_dict = ", all_assets_dicts)
# if type(entity) == 'list':
# for elem in entity:
# entity_sub_id(elem, all_assets_dicts)
# elif type(entity) == 'tuple':
# entity1 = entity[1]
if type(entity) == 'tuple':
entity1 = entity[1]
else:
entity1 = entity
if entity1[:entity1.rfind('_')] in list(all_assets_dicts.keys()):
return all_assets_dicts[entity1[:entity1.rfind('_')]] + entity1[entity1.rfind('_'):]
def write_all_prep_to_file(all_chosen_assets, subj_obj_dict):
print("subj_obj_dict = ", subj_obj_dict)
print("all_chosen_assets = ", all_chosen_assets)
formattedDict = dict()
# copy and edit subj_obj_dict
# k = Cow_0, v= [('on', 'sidewalk_1'), ('in', 'city area_2')]
for k, v in subj_obj_dict.items():
# if v[0] == ("", "")
if len(v[0][0]) != 0 and len(v[0][1]) != 0:
print("k,v pair = ", k, '\t', v, '\n')
# try:
if entity_sub_id(k, all_chosen_assets) in list(formattedDict.keys()):
for elem in v:
print("Inner tuple: ", elem)
formattedDict[entity_sub_id(k, all_chosen_assets)].append(
(elem[0], entity_sub_id(elem[1], all_chosen_assets)))
else:
for elem in v:
print("Inner tuple: ", elem)
# if elem[0]
formattedDict[entity_sub_id(k, all_chosen_assets)] = [(
elem[0], entity_sub_id(elem[1], all_chosen_assets))]
else:
formattedDict[entity_sub_id(k, all_chosen_assets)] = [("", "")]
# formattedDict[k] = ("", "")
# except Exception as e:
# print("e = ", e, " when k = ", k, " v = ", v)
print("final Formatted Dict = ", formattedDict)
# In the new dict, store in subject_all_objects_dict values, but format its keys to
# AssetName_AssetID (entity_asset_dict.keys) +_+<entityID> (subject_all_objects_dict.keys)
# Compare - from subject_all_objects_dict, remove from keys and value tuples, everything after & including the last '_'
# - replace remaining keys & values in subject_all_objects_dict with values of keys that match with them
# EXAMPLE
# Entity Asset Dictionary:
# {'Cow': 'Wooden_Chair_uknkaffaw', 'sidewalk': 'Brick_Wall_vgvmcgf', 'city area': 'Brick_Wall_vgvmcgf', 'shops': 'Wooden_Chair_uknkaffaw'}
# All Subject-Objects Dictionary:
# {'Cow_0': [('on', 'sidewalk_1'), ('in', 'city area_2')], 'city area_2': [('near', 'shops_3')]}
# RESULT
# {'Wooden_Chair_uknkaffaw_0': [('on', 'Brick_Wall_vgvmcgf_1'), ('in', 'Brick_Wall_vgvmcgf_2')], 'Brick_Wall_vgvmcgf_2': [('near', 'Wooden_Chair_uknkaffaw_3')]}
# fil = open("/project/.txt", "a")
# for k, v in list(formattedDict.items()):
# for obj in v:
# print("writing tuple - ", obj)
# fil.write(obj[1] + '\n')
# fil.write(obj[0] + '\n')
# fil.write(k + '\n')
# fil.write('\n')
# print("Written to file")
return formattedDict
# Lady_Fern_wdvlditia_3
# near
# Mossy_Boulder_ulldfii_2
def write_to_file(chosen_asset, subj_obj_dict):
# write_prep_to_file()
formatted_writing = formatAssetWriting(chosen_asset, subj_obj_dict)
line = re.sub(" ", "_", chosen_asset[2])
line += "_"+chosen_asset[1]
# append mode
fil = open("/project/.txt", "a")
fil.write(line)
fil.write("\n\n\n")
fil.close()
def match_KG_input_cos(KG_info, desc_dict, subj_obj_dict):
# asset_Tag_dict, asset_Size_dict, asset_ID_dict, asset_Name_dict
entity_asset_dict = dict()
# input_tensors
# [entity_tensors, entityName_tensor_dictionary]
# print("Input Desc:", desc_dict)
all_chosen_assets = dict()
for k, v in desc_dict.items():
# for in_tensor in input_tensors[0]:
# for each input entity, find closest assetID and path+append to file: 'assetName_assetID'
chosen_asset = find_similar_asset_cos({k: v}, KG_info)
print("Chosen asset = ", chosen_asset)
all_chosen_assets[chosen_asset[0]] = chosen_asset[2].replace(
' ', '_')+'_'+chosen_asset[1]
entity_asset_dict[k] = re.sub(
" ", "_", chosen_asset[2]) + "_" + chosen_asset[1]
return entity_asset_dict, write_all_prep_to_file(all_chosen_assets, subj_obj_dict)
def match_KG_input(KG_info, input_tensors):
entity_asset_dict = dict()
for k, v in input_tensors[1].items():
chosen_asset = find_similar_asset(v, KG_info)
entity_asset_dict[k] = re.sub(
" ", "_", chosen_asset[1]) + "_" + chosen_asset[0]
write_to_file(chosen_asset)
return entity_asset_dict
def spawn_object_behind(first_actor, second_actor):
print("In BEHIND Function")
camera_location, camera_rotation = unreal.EditorLevelLibrary.get_level_viewport_camera_info()
#camera_rotation.get_forward_vector()
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_width = first_actor_bounding_box.box_extent.x
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_width = second_actor_bounding_box.box_extent.x
vec = unreal.Vector(first_actor_width+ second_actor_width + 200.0, 0, 0)
second_actor.set_actor_location(first_actor_loc - vec, False, True)
def spawn_object_at_back_of(first_actor, second_actor):
print("In BACK OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_width = first_actor_bounding_box.box_extent.x
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_width = second_actor_bounding_box.box_extent.x
vec = unreal.Vector(first_actor_width+ second_actor_width, 0, 0)
second_actor.set_actor_location(first_actor_loc - vec, False, True)
def spawn_object_in_front_of(first_actor, second_actor):
print("In FRONT OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_width = first_actor_bounding_box.box_extent.x
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_width = second_actor_bounding_box.box_extent.x
vec = unreal.Vector(first_actor_width+ second_actor_width, 0, 0)
second_actor.set_actor_location(first_actor_loc + vec, False, True)
def spawn_object_at_right_of(first_actor, second_actor):
print("In RIGHT OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_length = first_actor_bounding_box.box_extent.y
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_length = second_actor_bounding_box.box_extent.y
vec = unreal.Vector(0,first_actor_length+ second_actor_length, 0)
second_actor.set_actor_location(first_actor_loc + vec, False, True)
def spawn_object_at_left_of(first_actor, second_actor):
print("In LEFT OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_length = first_actor_bounding_box.box_extent.y
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_length = second_actor_bounding_box.box_extent.y
vec = unreal.Vector(0,first_actor_length+ second_actor_length, 0)
second_actor.set_actor_location(first_actor_loc - vec, False, True)
def spawn_object_on(first_actor, second_actor):
print("In ON OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_height = first_actor_bounding_box.box_extent.z
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_height = second_actor_bounding_box.box_extent.z
vec = unreal.Vector( 0, 0,first_actor_height + second_actor_height)
second_actor.set_actor_location(first_actor_loc + vec, False, True)
def spawn_object_beneath(first_actor, second_actor):
print("In BENEATH OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_height = first_actor_bounding_box.box_extent.z
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_height = second_actor_bounding_box.box_extent.z
vec = unreal.Vector( 0, 0,first_actor_height + second_actor_height)
first_actor.set_actor_location(second_actor_loc + vec, False, True)
def spawn_object_in(first_actor, second_actor):
print("In IN OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_height = first_actor_bounding_box.box_extent.z
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_height = second_actor_bounding_box.box_extent.z
#vec = unreal.Vector( 0, 0,first_actor_height + second_actor_height)
#first_actor.set_actor_location(second_actor_loc + vec, False, True)
def spawn_object_over(first_actor, second_actor):
print("In OVER OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_height = first_actor_bounding_box.box_extent.z
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_height = second_actor_bounding_box.box_extent.z
vec = unreal.Vector( 0, 0,first_actor_height + second_actor_height + 250)
second_actor.set_actor_location(first_actor_loc + vec, False, True)
def spawn_object_under(first_actor, second_actor):
print("In UNDER OF Function")
#Fisrt Actor
first_actor_loc = first_actor.get_actor_location()
first_actor_static_mesh_component = first_actor.get_component_by_class(unreal.StaticMeshComponent)
first_actor_static_mesh = first_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
first_actor_bounding_box = first_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
first_actor_height = first_actor_bounding_box.box_extent.z
#Second Actor
second_actor_loc = second_actor.get_actor_location()
second_actor_static_mesh_component = second_actor.get_component_by_class(unreal.StaticMeshComponent)
second_actor_static_mesh = second_actor_static_mesh_component.static_mesh
# Get the bounding box of the static mesh component
second_actor_bounding_box = second_actor_static_mesh.get_bounds()
# Extract the height from the bounding box
second_actor_height = second_actor_bounding_box.box_extent.z
vec = unreal.Vector( 0, 0,first_actor_height + second_actor_height+ 250)
first_actor.set_actor_location(second_actor_loc + vec, False, True)
def spawn_object_besides(first_actor, second_actor):
spawn_object_at_right_of(first_actor, second_actor)
def spawn_assets(asset_dicts):
asset_directory = "/project/"
file1 = open('/project/.txt', 'r')
Lines = file1.readlines()
#class_path = "/project/.CustomizableActor"
for sub,objs in asset_dicts.items():
print(f"Asset = {sub}")
prev_actor = spawn_an_asset(sub)
if prev_actor != None:
for obj in objs:
if obj[0] != "" and obj[1] != "":
print(f"Obj1 = {obj[1]}")
actor = spawn_an_asset(obj[1])
if actor!= None:
if obj[0] == "on":
spawn_object_on(prev_actor, actor)
elif obj[0] == "back":
spawn_object_at_back_of(prev_actor, actor)
elif obj[0] == "front":
spawn_object_in_front_of(prev_actor, actor)
elif obj[0] == "right":
spawn_object_at_right_of(prev_actor, actor)
elif obj[0] == "left":
spawn_object_at_left_of(prev_actor, actor)
elif obj[0] == "behind":
spawn_object_behind(prev_actor, actor)
elif obj[0] == "beneath":
spawn_object_beneath(prev_actor, actor)
elif obj[0] == "in":
spawn_object_in(prev_actor, actor)
elif obj[0] == "over":
spawn_object_over(prev_actor, actor)
elif obj[0] == "under":
spawn_object_under(prev_actor, actor)
elif obj[0] == "besides":
spawn_object_besides(prev_actor, actor)
else:
print("first actor didnt spawn")
def spawn_an_asset(asset_name):
asset_directory = "/project/"
world = unreal.EditorLevelLibrary.get_editor_world()
# Spawn an actor
actor_location = unreal.Vector(0.0, 0.0, 0.0)
actor_rotation = unreal.Rotator(0.0, 0.0, 0.0)
actor_class = unreal.EditorAssetLibrary.load_blueprint_class("/project/")
actor = unreal.EditorLevelLibrary.spawn_actor_from_class(actor_class, actor_location, actor_rotation)
asset_registry_module = unreal.AssetRegistryHelpers.get_asset_registry()
assets = asset_registry_module.get_assets_by_path(asset_directory , recursive=True, include_only_on_disk_assets=False)
filter = unreal.ARFilter(recursive_paths=True)
filter.class_names.append("StaticMesh")
assets = asset_registry_module.run_assets_through_filter(assets, filter)
#print(f"assets = {assets}")
f_asset_name = asset_name[:asset_name.rfind('_')]
for asset in assets:
asset_str = str(asset.asset_name)
asset_str = asset_str.strip()
line_str = "S_" + f_asset_name.strip()
line_str = line_str.strip()
if asset_str.find(line_str) != -1:
print("Matched ", asset_str, " to ", line_str)
print(asset)
#actor.static_mesh_component.set_static_mesh(static_mesh)
static_mesh = unreal.EditorAssetLibrary.load_asset(str(asset.object_path))
static_mesh_component = actor.get_component_by_class(unreal.StaticMeshComponent)
# Set the static mesh of the component
static_mesh_component.set_static_mesh(static_mesh)
editor_world = unreal.EditorLevelLibrary.get_editor_world()
if editor_world:
print("Hello")
camera_location, camera_rotation = unreal.EditorLevelLibrary.get_level_viewport_camera_info()
# Calculate the spawn location in front of the camera
spawn_distance = 500.0 # How far in front of the camera to spawn the asset
print(f"forward vector{camera_rotation}")
spawn_location = camera_location + (camera_rotation.get_forward_vector() * spawn_distance)
spawn_location.z = 0.0 # Set the Z coordinate to ground level
actor.set_actor_location(spawn_location, False, True)
return actor
return None
def main(sentence):
# sentence = "There is a small green leaf under the gray rock."
# sentence = 'There is a small green leaf on a large gray boulder.'
# sentence = 'There is a small green leaf on a large gray boulder. The boulder sits in a wide river bank.'
# sentence = 'There is a small green leaf on a large gray boulder. The boulder with pebbles around it is in a wide river.'
# Co-reference resolution issue + in,has not detected as relations
# sentence = 'A small green leaf lies on a large gray boulder. The boulder is in a wide river and has small pebbles around it.'
# sentence = 'A small green leaf lies on a large gray boulder. The boulder sits in a wide river. The leaf has small pebbles around it.'
#sentence = 'Cow standing on sidewalk in city area near shops .'
# sentence = 'Place a rock besides a boulder.'
#sentence = 'place a small rock next to a wall'
# sentence = 'There is a small green leaf on a large gray boulder.'
# KG_info = get_KG_asset_tensors()
# new_file_paths = trigger_update(path_to_files)
# update_KG(new_file_paths)
KG_assets_only_info = get_KG_assets()
# print('Input your own sentence. Type q to quit.')
# while True:
# sentence = input('> ')
# if sent.strip() == 'q':
# break
fil = open("/project/.txt", "w")
fil.close()
res_dict, parserOutput = scenegraphTable(sentence)
res_dict = scene_graph_list(res_dict)[1]
subject_all_objects_dict = subjectAllObjectsDict(parserOutput)
print("subject_all_objects_dict = ", subject_all_objects_dict)
entity_asset_dict, spawning_dict = match_KG_input_cos(
KG_assets_only_info, res_dict, subject_all_objects_dict)
print("Entity Asset Dictionar/project/", entity_asset_dict)
final_spawning_dict = dict()
for k, v in list(spawning_dict.items()):
final_spawning_dict[k] = []
for tuple in v:
if len(v[0][0]) != 0 and len(v[0][1]) != 0:
final_spawning_dict[k].append(
(sub_closest_prep(tuple[0]), tuple[1]))
else:
final_spawning_dict[k].append(("",""))
print("Final Spawning Dict Format: \n", final_spawning_dict)
spawn_assets(final_spawning_dict)
#if __name__ == '__main__':
#main(sentence)
|
import unreal
file_a = "/project/.fbx"
file_b = "/project/.fbx"
imported_scenes_path = "/project/"
print 'Preparing import options...'
advanced_mesh_options = unreal.DatasmithStaticMeshImportOptions()
advanced_mesh_options.set_editor_property('max_lightmap_resolution', unreal.DatasmithImportLightmapMax.LIGHTMAP_512)
advanced_mesh_options.set_editor_property('min_lightmap_resolution', unreal.DatasmithImportLightmapMin.LIGHTMAP_64)
advanced_mesh_options.set_editor_property('generate_lightmap_u_vs', True)
advanced_mesh_options.set_editor_property('remove_degenerates', True)
base_options = unreal.DatasmithImportBaseOptions()
base_options.set_editor_property('include_geometry', True)
base_options.set_editor_property('include_material', True)
base_options.set_editor_property('include_light', True)
base_options.set_editor_property('include_camera', True)
base_options.set_editor_property('include_animation', True)
base_options.set_editor_property('static_mesh_options', advanced_mesh_options)
base_options.set_editor_property('scene_handling', unreal.DatasmithImportScene.CURRENT_LEVEL)
base_options.set_editor_property('asset_options', []) # Not used
vred_options = unreal.DatasmithVREDImportOptions()
vred_options.set_editor_property('merge_nodes', False)
vred_options.set_editor_property('optimize_duplicated_nodes', False)
vred_options.set_editor_property('import_var', True)
vred_options.set_editor_property('var_path', "")
vred_options.set_editor_property('import_light_info', True)
vred_options.set_editor_property('light_info_path', "")
vred_options.set_editor_property('import_clip_info', True)
vred_options.set_editor_property('clip_info_path', "")
vred_options.set_editor_property('textures_dir', "")
vred_options.set_editor_property('import_animations', True)
vred_options.set_editor_property('intermediate_serialization', unreal.DatasmithVREDIntermediateSerializationType.DISABLED)
vred_options.set_editor_property('colorize_materials', False)
vred_options.set_editor_property('generate_lightmap_u_vs', False)
vred_options.set_editor_property('import_animations', True)
# Direct import to scene and assets:
print 'Importing directly to scene...'
unreal.VREDLibrary.import_(file_a, imported_scenes_path, base_options, None, True)
#2-stage import step 1:
print 'Parsing to scene object...'
scene = unreal.DatasmithVREDSceneElement.construct_datasmith_scene_from_file(file_b, imported_scenes_path, base_options, vred_options)
print 'Resulting datasmith scene: ' + str(scene)
print '\tProduct name: ' + str(scene.get_product_name())
print '\tMesh actor count: ' + str(len(scene.get_all_mesh_actors()))
print '\tLight actor count: ' + str(len(scene.get_all_light_actors()))
print '\tCamera actor count: ' + str(len(scene.get_all_camera_actors()))
print '\tCustom actor count: ' + str(len(scene.get_all_custom_actors()))
print '\tMaterial count: ' + str(len(scene.get_all_materials()))
print '\tAnimNode count: ' + str(len(scene.get_all_anim_nodes()))
print '\tAnimClip count: ' + str(len(scene.get_all_anim_clips()))
print '\tExtra light info count: ' + str(len(scene.get_all_extra_lights_info()))
print '\tVariant count: ' + str(len(scene.get_all_variants()))
# Modify one of the AnimNodes
# Warning: The AnimNode nested structure is all USTRUCTs, which are value types, and the Array accessor returns
# a copy. Meaning something like anim_nodes[0].name = 'new_name' will set the name on the COPY of anim_nodes[0]
anim_nodes = scene.get_all_anim_nodes()
if len(anim_nodes) > 0:
node_0 = anim_nodes[0]
old_name = node_0.name
print 'Anim node old name: ' + old_name
node_0.name += '_MODIFIED'
modified_name = node_0.name
print 'Anim node modified name: ' + modified_name
anim_nodes[0] = node_0
scene.set_all_anim_nodes(anim_nodes)
# Check modification
new_anim_nodes = scene.get_all_anim_nodes()
print 'Anim node retrieved modified name: ' + new_anim_nodes[0].name
assert new_anim_nodes[0].name == modified_name, "Node modification didn't work!"
# Restore to previous state
node_0 = new_anim_nodes[0]
node_0.name = old_name
new_anim_nodes[0] = node_0
scene.set_all_anim_nodes(new_anim_nodes)
# 2-stage import step 2:
print 'Importing assets and actors...'
result = scene.import_scene()
print 'Import results: '
print '\tImported actor count: ' + str(len(result.imported_actors))
print '\tImported mesh count: ' + str(len(result.imported_meshes))
print '\tImported level sequences: ' + str([a.get_name() for a in result.animations])
print '\tImported level variant sets asset: ' + str(result.level_variant_sets.get_name())
if result.import_succeed:
print 'Import succeeded!'
else:
print 'Import failed!'
|
import unreal
# Get the current editor world
editor_world = unreal.EditorLevelLibrary.get_editor_world()
#editor_world = unreal.get_editor_subsystem(unreal.LevelEditorSubsystem)
#Get post process settings
# Define the location and rotation for the PostProcessVolume
location = unreal.Vector(0, 0, 0)
rotation = unreal.Rotator(0, 0, 0)
# Spawn the PostProcessVolume
post_process_volume = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.PostProcessVolume.static_class(), location, rotation)
post_process_volume.unbound = True # Affect whole scene
# Disable autoexposure for more control
# NOTE: This just sets the values, you still need to activate these properties in the editor
# TODO: Figure out if this can be done from Python
settings = post_process_volume.
settings.bloom_intensity = 100.0
settings.auto_exposure_method = unreal.AutoExposureMethod.AEM_MANUAL
settings.auto_exposure_apply_physical_camera_exposure = False
#post_process_volume.set_editor_property('Bloom', unreal.PostProcessSettings.bloom_intensity, True)
#ppv.settings.set_editor_property("AutoExposureMethod", unreal.AutoExposureMethod.AEM_MANUAL, unreal.PropertyAccessChangeNotifyMode.ALWAYS)
|
import unreal
import os.path
import json
class bone_limits_struct():
bone_limit_x_min = 0.0
bone_limit_x_max = 0.0
bone_limit_y_min = 0.0
bone_limit_y_max = 0.0
bone_limit_z_min = 0.0
bone_limit_z_max = 0.0
def get_x_range(self):
return abs(self.bone_limit_x_max - self.bone_limit_x_min)
def get_y_range(self):
return abs(self.bone_limit_y_max - self.bone_limit_y_min)
def get_z_range(self):
return abs(self.bone_limit_z_max - self.bone_limit_z_min)
# For getting the preferred angle, it seems like we want the largest angle, not the biggest range
def get_x_max_angle(self):
return max(abs(self.bone_limit_x_max), abs(self.bone_limit_x_min))
def get_y_max_angle(self):
return max(abs(self.bone_limit_y_max), abs(self.bone_limit_y_min))
def get_z_max_angle(self):
return max(abs(self.bone_limit_z_max), abs(self.bone_limit_z_min))
def get_preferred_angle(self):
if(self.get_x_max_angle() > self.get_y_max_angle() and self.get_x_max_angle() > self.get_z_max_angle()):
if abs(self.bone_limit_x_min) > abs(self.bone_limit_x_max): return self.bone_limit_x_min, 0.0, 0.0
return self.bone_limit_x_max, 0.0, 0.0
if(self.get_y_max_angle() > self.get_x_max_angle() and self.get_y_max_angle() > self.get_z_max_angle()):
if abs(self.bone_limit_y_min) > abs(self.bone_limit_y_max): return 0.0, self.bone_limit_y_min, 0.0
return 0.0, self.bone_limit_y_max, 0.0
if(self.get_z_max_angle() > self.get_x_max_angle() and self.get_z_max_angle() > self.get_y_max_angle()):
if abs(self.bone_limit_z_min) > abs(self.bone_limit_z_max): return 0.0, 0.0, self.bone_limit_z_min
return 0.0, 0.0, self.bone_limit_z_max
def get_up_vector(self):
x, y, z = self.get_preferred_angle()
primary_axis = unreal.Vector(x, y, z)
primary_axis.normalize()
up_axis = unreal.Vector(-1.0 * z, y, -1.0 * x)
return up_axis.normal()
def get_bone_limits(dtu_json, skeletal_mesh_force_front_x):
limits = dtu_json['LimitData']
bone_limits_dict = {}
for bone_limits in limits.values():
bone_limits_data = bone_limits_struct()
# Get the name of the bone
bone_limit_name = bone_limits[0]
# Get the bone limits
bone_limits_data.bone_limit_x_min = bone_limits[2]
bone_limits_data.bone_limit_x_max = bone_limits[3]
bone_limits_data.bone_limit_y_min = bone_limits[4] * -1.0
bone_limits_data.bone_limit_y_max = bone_limits[5] * -1.0
bone_limits_data.bone_limit_z_min = bone_limits[6] * -1.0
bone_limits_data.bone_limit_z_max = bone_limits[7] * -1.0
# update the axis if force front was used (facing right)
if skeletal_mesh_force_front_x:
bone_limits_data.bone_limit_y_min = bone_limits[2] * -1.0
bone_limits_data.bone_limit_y_max = bone_limits[3] * -1.0
bone_limits_data.bone_limit_z_min = bone_limits[4] * -1.0
bone_limits_data.bone_limit_z_max = bone_limits[5] * -1.0
bone_limits_data.bone_limit_x_min = bone_limits[6]
bone_limits_data.bone_limit_x_max = bone_limits[7]
bone_limits_dict[bone_limit_name] = bone_limits_data
return bone_limits_dict
def get_bone_limits_from_skeletalmesh(skeletal_mesh):
asset_import_data = skeletal_mesh.get_editor_property('asset_import_data')
fbx_path = asset_import_data.get_first_filename()
dtu_file = fbx_path.rsplit('.', 1)[0] + '.dtu'
dtu_file = dtu_file.replace('/UpdatedFBX/', '/')
print(dtu_file)
if os.path.exists(dtu_file):
dtu_data = json.load(open(dtu_file))
force_front_x = asset_import_data.get_editor_property('force_front_x_axis')
bone_limits = get_bone_limits(dtu_data, force_front_x)
return bone_limits
return []
def get_character_type(dtu_json):
asset_id = dtu_json['Asset Id']
if asset_id.lower().startswith('genesis8'): return 'Genesis8'
if asset_id.lower().startswith('genesis9'): return 'Genesis9'
return 'Unknown'
def set_control_shape(blueprint, bone_name, shape_type):
hierarchy = blueprint.hierarchy
control_name = bone_name + '_ctrl'
control_settings_root_ctrl = unreal.RigControlSettings()
control_settings_root_ctrl.animation_type = unreal.RigControlAnimationType.ANIMATION_CONTROL
control_settings_root_ctrl.control_type = unreal.RigControlType.EULER_TRANSFORM
control_settings_root_ctrl.display_name = 'None'
control_settings_root_ctrl.draw_limits = True
control_settings_root_ctrl.shape_color = unreal.LinearColor(1.000000, 0.000000, 0.000000, 1.000000)
control_settings_root_ctrl.shape_visible = True
control_settings_root_ctrl.is_transient_control = False
control_settings_root_ctrl.limit_enabled = [unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False), unreal.RigControlLimitEnabled(False, False)]
control_settings_root_ctrl.minimum_value = unreal.RigHierarchy.make_control_value_from_euler_transform(unreal.EulerTransform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[0.000000,0.000000,0.000000]))
control_settings_root_ctrl.maximum_value = unreal.RigHierarchy.make_control_value_from_euler_transform(unreal.EulerTransform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[0.000000,0.000000,0.000000]))
control_settings_root_ctrl.primary_axis = unreal.RigControlAxis.X
if shape_type == "root":
control_settings_root_ctrl.shape_name = 'Square_Thick'
hierarchy.set_control_settings(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), control_settings_root_ctrl)
hierarchy.set_control_shape_transform(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.Transform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[10.000000,10.000000,10.000000]), True)
if shape_type == "hip":
control_settings_root_ctrl.shape_name = 'Hexagon_Thick'
hierarchy.set_control_settings(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), control_settings_root_ctrl)
hierarchy.set_control_shape_transform(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.Transform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[10.000000,10.000000,10.000000]), True)
if shape_type == "iktarget":
control_settings_root_ctrl.shape_name = 'Box_Thin'
hierarchy.set_control_settings(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), control_settings_root_ctrl)
#hierarchy.set_control_shape_transform(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.Transform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[10.000000,10.000000,10.000000]), True)
if shape_type == "large_2d_bend":
control_settings_root_ctrl.shape_name = 'Arrow4_Thick'
hierarchy.set_control_settings(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), control_settings_root_ctrl)
hierarchy.set_control_shape_transform(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.Transform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[8.000000,8.000000,8.000000]), True)
if shape_type == "slider":
control_settings_root_ctrl.control_type = unreal.RigControlType.FLOAT
control_settings_root_ctrl.primary_axis = unreal.RigControlAxis.Y
control_settings_root_ctrl.limit_enabled = [unreal.RigControlLimitEnabled(True, True)]
control_settings_root_ctrl.minimum_value = unreal.RigHierarchy.make_control_value_from_float(0.000000)
control_settings_root_ctrl.maximum_value = unreal.RigHierarchy.make_control_value_from_float(1.000000)
control_settings_root_ctrl.shape_name = 'Arrow2_Thin'
hierarchy.set_control_settings(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), control_settings_root_ctrl)
hierarchy.set_control_shape_transform(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.Transform(location=[0.000000,0.000000,0.000000],rotation=[0.000000,0.000000,0.000000],scale=[0.5,0.5,0.5]), True)
last_construction_link = 'PrepareForExecution'
construction_y_pos = 200
def create_construction(blueprint, bone_name):
global last_construction_link
global construction_y_pos
rig_controller = blueprint.get_controller_by_name('RigVMModel')
if rig_controller is None:
rig_controller = blueprint.get_controller()
control_name = bone_name + '_ctrl'
get_bone_transform_node_name = "RigUnit_Construction_GetTransform_" + bone_name
set_control_transform_node_name = "RigtUnit_Construction_SetTransform_" + control_name
rig_controller.add_unit_node_from_struct_path('/project/.RigUnit_GetTransform', 'Execute', unreal.Vector2D(1000.0, construction_y_pos), get_bone_transform_node_name)
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Item', '(Type=Bone)')
rig_controller.set_pin_expansion(get_bone_transform_node_name + '.Item', True)
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Space', 'GlobalSpace')
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.bInitial', 'True')
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Item.Name', bone_name, True)
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Item.Type', 'Bone', True)
rig_controller.set_node_selection([get_bone_transform_node_name])
try:
rig_controller.add_template_node('Set Transform::Execute(in Item,in Space,in bInitial,in Value,in Weight,in bPropagateToChildren,io ExecuteContext)', unreal.Vector2D(1300.0, construction_y_pos), set_control_transform_node_name)
except Exception as e:
set_transform_scriptstruct = get_scriptstruct_by_node_name("SetTransform")
rig_controller.add_unit_node(set_transform_scriptstruct, 'Execute', unreal.Vector2D(526.732236, -608.972187), set_control_transform_node_name)
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Item', '(Type=Bone,Name="None")')
rig_controller.set_pin_expansion(set_control_transform_node_name + '.Item', False)
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Space', 'GlobalSpace')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.bInitial', 'True')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Weight', '1.000000')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.bPropagateToChildren', 'True')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Item.Name', control_name, True)
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Item.Type', 'Control', True)
try:
rig_controller.add_link(get_bone_transform_node_name + '.Transform', set_control_transform_node_name + '.Value')
except:
try:
rig_controller.add_link(get_bone_transform_node_name + '.Transform', set_control_transform_node_name + '.Transform')
except Exception as e:
print("ERROR: CreateControlRig.py, line 45: rig_controller.add_link(): " + str(e))
#rig_controller.set_node_position_by_name(set_control_transform_node_name, unreal.Vector2D(512.000000, -656.000000))
rig_controller.add_link(last_construction_link + '.ExecuteContext', set_control_transform_node_name + '.ExecuteContext')
last_construction_link = set_control_transform_node_name
construction_y_pos = construction_y_pos + 250
last_backward_solver_link = 'InverseExecution.ExecuteContext'
def create_backward_solver(blueprint, bone_name):
global last_backward_solver_link
control_name = bone_name + '_ctrl'
get_bone_transform_node_name = "RigUnit_BackwardSolver_GetTransform_" + bone_name
set_control_transform_node_name = "RigtUnit_BackwardSolver_SetTransform_" + control_name
rig_controller = blueprint.get_controller_by_name('RigVMModel')
if rig_controller is None:
rig_controller = blueprint.get_controller()
#rig_controller.add_link('InverseExecution.ExecuteContext', 'RigUnit_SetTransform_3.ExecuteContext')
rig_controller.add_unit_node_from_struct_path('/project/.RigUnit_GetTransform', 'Execute', unreal.Vector2D(-636.574629, -1370.167943), get_bone_transform_node_name)
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Item', '(Type=Bone)')
rig_controller.set_pin_expansion(get_bone_transform_node_name + '.Item', True)
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Space', 'GlobalSpace')
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Item.Name', bone_name, True)
rig_controller.set_pin_default_value(get_bone_transform_node_name + '.Item.Type', 'Bone', True)
try:
rig_controller.add_template_node('Set Transform::Execute(in Item,in Space,in bInitial,in Value,in Weight,in bPropagateToChildren,io ExecuteContext)', unreal.Vector2D(-190.574629, -1378.167943), set_control_transform_node_name)
except:
set_transform_scriptstruct = get_scriptstruct_by_node_name("SetTransform")
rig_controller.add_unit_node(set_transform_scriptstruct, 'Execute', unreal.Vector2D(-190.574629, -1378.167943), set_control_transform_node_name)
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Item', '(Type=Bone,Name="None")')
rig_controller.set_pin_expansion(set_control_transform_node_name + '.Item', False)
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Space', 'GlobalSpace')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.bInitial', 'False')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Weight', '1.000000')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.bPropagateToChildren', 'True')
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Item.Name', control_name, True)
rig_controller.set_pin_default_value(set_control_transform_node_name + '.Item.Type', 'Control', True)
#rig_controller.set_pin_default_value(set_control_transform_node_name + '.Transform', '(Rotation=(X=0.000000,Y=0.000000,Z=0.000000,W=-1.000000),Translation=(X=0.551784,Y=-0.000000,Z=72.358307),Scale3D=(X=1.000000,Y=1.000000,Z=1.000000))', True)
try:
rig_controller.add_link(get_bone_transform_node_name + '.Transform', set_control_transform_node_name + '.Value')
except:
try:
rig_controller.add_link(get_bone_transform_node_name + '.Transform', set_control_transform_node_name + '.Transform')
except Exception as e:
print("ERROR: CreateControlRig.py, line 84: rig_controller.add_link(): " + str(e))
rig_controller.add_link(last_backward_solver_link, set_control_transform_node_name + '.ExecuteContext')
last_backward_solver_link = set_control_transform_node_name + '.ExecuteContext'
def parent_control_to_control(hierarchy_controller, parent_control_name ,control_name):
hierarchy_controller.set_parent(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=parent_control_name), False)
def parent_control_to_bone(hierarchy_controller, parent_bone_name, control_name):
hierarchy_controller.set_parent(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.RigElementKey(type=unreal.RigElementType.BONE, name=parent_bone_name), False)
next_forward_execute = 'BeginExecution.ExecuteContext'
node_y_pos = 200.0
def create_control(blueprint, bone_name, parent_bone_name, shape_type):
global next_forward_execute
global node_y_pos
hierarchy = blueprint.hierarchy
hierarchy_controller = hierarchy.get_controller()
control_name = bone_name + '_ctrl'
default_setting = unreal.RigControlSettings()
default_setting.shape_name = 'Box_Thin'
default_setting.control_type = unreal.RigControlType.EULER_TRANSFORM
default_value = hierarchy.make_control_value_from_euler_transform(
unreal.EulerTransform(scale=[1, 1, 1]))
key = unreal.RigElementKey(type=unreal.RigElementType.BONE, name=bone_name)
hierarchy_controller.remove_element(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name))
rig_control_element = hierarchy.find_control(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name))
control_key = rig_control_element.get_editor_property('key')
#print(rig_control_element)
if control_key.get_editor_property('name') != "":
control_key = rig_control_element.get_editor_property('key')
else:
try:
control_key = hierarchy_controller.add_control(control_name, unreal.RigElementKey(), default_setting, default_value, True, True)
except:
control_key = hierarchy_controller.add_control(control_name, unreal.RigElementKey(), default_setting, default_value, True)
#hierarchy_controller.remove_all_parents(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), True)
transform = hierarchy.get_global_transform(key, True)
hierarchy.set_control_offset_transform(control_key, transform, True)
hierarchy.set_control_offset_transform(control_key, transform, False)
#hierarchy.set_global_transform(control_key, unreal.Transform(), True)
#hierarchy.set_global_transform(control_key, unreal.Transform(), False)
#hierarchy.set_global_transform(control_key, transform, False)
# if bone_name in control_list:
# create_direct_control(bone_name)
# elif bone_name in effector_list:
# create_effector(bone_name)
#create_direct_control(bone_name)
create_construction(blueprint, bone_name)
create_backward_solver(blueprint, bone_name)
set_control_shape(blueprint, bone_name, shape_type)
if shape_type in ['iktarget', 'large_2d_bend', 'slider']: return control_name
# Link Control to Bone
get_transform_node_name = bone_name + "_GetTransform"
#
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_GetTransform', 'Execute', unreal.Vector2D(49.941063, node_y_pos), get_transform_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(get_transform_node_name + '.Item', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Space', 'GlobalSpace')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.bInitial', 'False')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item.Name', control_name, True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item.Type', 'Control', True)
set_transform_node_name = bone_name + "_SetTransform"
blueprint.get_controller_by_name('RigVMModel').add_template_node('Set Transform::Execute(in Item,in Space,in bInitial,in Value,in Weight,in bPropagateToChildren)', unreal.Vector2D(701.941063, node_y_pos), set_transform_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.Item', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(set_transform_node_name + '.Item', False)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.Space', 'GlobalSpace')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.bInitial', 'False')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.Weight', '1.000000')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.bPropagateToChildren', 'True')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.Item.Name', bone_name, True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.Item.Type', 'Bone', True)
#blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(set_transform_node_name + '.Transform', '(Rotation=(X=0.000000,Y=0.000000,Z=0.000000,W=1.000000),Translation=(X=0.000000,Y=0.000000,Z=0.000000),Scale3D=(X=1.000000,Y=1.000000,Z=1.000000))', True)
blueprint.get_controller_by_name('RigVMModel').add_link(get_transform_node_name + '.Transform', set_transform_node_name + '.Value')
blueprint.get_controller_by_name('RigVMModel').add_link(next_forward_execute, set_transform_node_name + '.ExecuteContext')
next_forward_execute = set_transform_node_name + '.ExecuteContext'
if parent_bone_name:
parent_control_name = parent_bone_name + '_ctrl'
#hierarchy_controller.set_parent(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=control_name), unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=parent_control_name), True)
node_y_pos = node_y_pos + 200
return control_name
def create_limb_ik(blueprint, skeleton, bone_limits, root_bone_name, end_bone_name, shape_type):
global next_forward_execute
global node_y_pos
end_bone_ctrl = end_bone_name + '_ctrl'
hierarchy = blueprint.hierarchy
hierarchy_controller = hierarchy.get_controller()
rig_controller = blueprint.get_controller_by_name('RigVMModel')
limb_ik_node_name = end_bone_name + '_FBIK'
rig_controller.add_unit_node_from_struct_path('/project/.RigUnit_PBIK', 'Execute', unreal.Vector2D(370.599976, node_y_pos), limb_ik_node_name)
rig_controller.set_pin_default_value(limb_ik_node_name + '.Settings', '(Iterations=20,MassMultiplier=1.000000,MinMassMultiplier=0.200000,bStartSolveFromInputPose=True)')
#rig_controller.set_pin_expansion('PBIK.Settings', False)
#rig_controller.set_pin_default_value('PBIK.Debug', '(DrawScale=1.000000)')
#rig_controller.set_pin_expansion('PBIK.Debug', False)
#rig_controller.set_node_selection(['PBIK'])
rig_controller.set_pin_default_value(limb_ik_node_name + '.Root', root_bone_name, False)
root_FBIK_settings = rig_controller.insert_array_pin(limb_ik_node_name + '.BoneSettings', -1, '')
rig_controller.set_pin_default_value(root_FBIK_settings + '.Bone', 'pelvis', False)
rig_controller.set_pin_default_value(root_FBIK_settings + '.RotationStiffness', '1.0', False)
rig_controller.set_pin_default_value(root_FBIK_settings + '.PositionStiffness', '1.0', False)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(limb_ik_node_name + '.Settings.RootBehavior', 'PinToInput', False)
get_transform_node_name = end_bone_name + "_GetTransform"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_GetTransform', 'Execute', unreal.Vector2D(49.941063, node_y_pos), get_transform_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(get_transform_node_name + '.Item', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Space', 'GlobalSpace')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.bInitial', 'False')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item.Name', end_bone_ctrl, True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item.Type', 'Control', True)
pin_name = rig_controller.insert_array_pin(limb_ik_node_name + '.Effectors', -1, '')
#print(pin_name)
# rig_controller.add_unit_node_from_struct_path('/project/.RigUnit_GetTransform', 'Execute', unreal.Vector2D(-122.733358, effector_get_transform_widget_height), get_transform_name)
# rig_controller.set_pin_default_value(get_transform_name + '.Item', '(Type=Bone)')
# rig_controller.set_pin_expansion(get_transform_name + '.Item', True)
# rig_controller.set_pin_default_value(get_transform_name + '.Space', 'GlobalSpace')
# rig_controller.set_pin_default_value(get_transform_name + '.Item.Name', control_name, True)
# rig_controller.set_pin_default_value(get_transform_name + '.Item.Type', 'Control', True)
# rig_controller.set_node_selection([get_transform_name])
rig_controller.add_link(get_transform_node_name + '.Transform', pin_name + '.Transform')
rig_controller.set_pin_default_value(pin_name + '.Bone', end_bone_name, False)
# if(bone_name in guide_list):
# rig_controller.set_pin_default_value(pin_name + '.StrengthAlpha', '0.200000', False)
# Limb Root Bone Settings
# bone_settings_name = blueprint.get_controller_by_name('RigVMModel').insert_array_pin(limb_ik_node_name + '.BoneSettings', -1, '')
# blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(bone_settings_name + '.Bone', root_bone_name, False)
# blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(bone_settings_name + '.RotationStiffness', '1.000000', False)
# blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(bone_settings_name + '.PositionStiffness', '1.000000', False)
# Joint Bone Settings
joint_bone_name = unreal.DazToUnrealBlueprintUtils.get_joint_bone(skeleton, root_bone_name, end_bone_name);
bone_settings_name = blueprint.get_controller_by_name('RigVMModel').insert_array_pin(limb_ik_node_name + '.BoneSettings', -1, '')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(bone_settings_name + '.Bone', joint_bone_name, False)
#blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(bone_settings_name + '.RotationStiffness', '1.000000', False)
#blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(bone_settings_name + '.PositionStiffness', '1.000000', False)
#print(bone_limits)
x_preferred, y_preferred, z_preferred = bone_limits[str(joint_bone_name)].get_preferred_angle()
# Figure out preferred angles, the primary angle is the one that turns the furthest from base pose
rig_controller.set_pin_default_value(bone_settings_name + '.bUsePreferredAngles', 'true', False)
rig_controller.set_pin_default_value(bone_settings_name + '.PreferredAngles.X', str(x_preferred), False)
rig_controller.set_pin_default_value(bone_settings_name + '.PreferredAngles.Y', str(y_preferred), False)
rig_controller.set_pin_default_value(bone_settings_name + '.PreferredAngles.Z', str(z_preferred), False)
blueprint.get_controller_by_name('RigVMModel').add_link(next_forward_execute, limb_ik_node_name + '.ExecuteContext')
node_y_pos = node_y_pos + 400
next_forward_execute = limb_ik_node_name + '.ExecuteContext'
def create_2d_bend(blueprint, skeleton, bone_limits, start_bone_name, end_bone_name, shape_type):
global node_y_pos
global next_forward_execute
ctrl_name = create_control(blueprint, start_bone_name, None, 'large_2d_bend')
distribute_node_name = start_bone_name + "_to_" + end_bone_name + "_DistributeRotation"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_DistributeRotationForItemArray', 'Execute', unreal.Vector2D(365.055382, node_y_pos), distribute_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(distribute_node_name + '.RotationEaseType', 'Linear')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(distribute_node_name + '.Weight', '0.25')
item_collection_node_name = start_bone_name + "_to_" + end_bone_name + "_ItemCollection"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_CollectionChainArray', 'Execute', unreal.Vector2D(120.870192, node_y_pos), item_collection_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.FirstItem', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(item_collection_node_name + '.FirstItem', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.LastItem', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(item_collection_node_name + '.LastItem', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.Reverse', 'False')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.FirstItem.Name', start_bone_name, False)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.LastItem.Name', end_bone_name, False)
blueprint.get_controller_by_name('RigVMModel').add_link(item_collection_node_name + '.Items', distribute_node_name + '.Items')
get_transform_node_name = ctrl_name + "_2dbend_GetTransform"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_GetTransform', 'Execute', unreal.Vector2D(49.941063, node_y_pos), get_transform_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(get_transform_node_name + '.Item', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Space', 'GlobalSpace')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.bInitial', 'False')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item.Name', ctrl_name, True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(get_transform_node_name + '.Item.Type', 'Control', True)
rotation_pin = blueprint.get_controller_by_name('RigVMModel').insert_array_pin(distribute_node_name + '.Rotations', -1, '')
print(rotation_pin)
blueprint.get_controller_by_name('RigVMModel').add_link(get_transform_node_name + '.Transform.Rotation', rotation_pin + '.Rotation')
#blueprint.get_controller_by_name('RigVMModel').add_link('rHand_GetTransform.Transform.Rotation', 'abdomenLower_to_chestUpper_DistributeRotation.Rotations.0.Rotation')
blueprint.get_controller_by_name('RigVMModel').add_link(next_forward_execute, distribute_node_name + '.ExecuteContext')
node_y_pos = node_y_pos + 350
next_forward_execute = distribute_node_name + '.ExecuteContext'
def create_slider_bend(blueprint, skeleton, bone_limits, start_bone_name, end_bone_name, parent_control_name):
global node_y_pos
global next_forward_execute
hierarchy = blueprint.hierarchy
hierarchy_controller = hierarchy.get_controller()
ctrl_name = create_control(blueprint, start_bone_name, None, 'slider')
distribute_node_name = start_bone_name + "_to_" + end_bone_name + "_DistributeRotation"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_DistributeRotationForItemArray', 'Execute', unreal.Vector2D(800.0, node_y_pos), distribute_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(distribute_node_name + '.RotationEaseType', 'Linear')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(distribute_node_name + '.Weight', '0.25')
rotation_pin = blueprint.get_controller_by_name('RigVMModel').insert_array_pin(distribute_node_name + '.Rotations', -1, '')
item_collection_node_name = start_bone_name + "_to_" + end_bone_name + "_ItemCollection"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_CollectionChainArray', 'Execute', unreal.Vector2D(120.0, node_y_pos), item_collection_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.FirstItem', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(item_collection_node_name + '.FirstItem', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.LastItem', '(Type=Bone,Name="None")')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(item_collection_node_name + '.LastItem', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.Reverse', 'False')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.FirstItem.Name', start_bone_name, False)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(item_collection_node_name + '.LastItem.Name', end_bone_name, False)
blueprint.get_controller_by_name('RigVMModel').add_link(item_collection_node_name + '.Items', distribute_node_name + '.Items')
# Create Rotation Node
rotation_node_name = start_bone_name + "_to_" + end_bone_name + "_Rotation"
x_preferred, y_preferred, z_preferred = bone_limits[start_bone_name].get_preferred_angle()
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigVMFunction_MathQuaternionFromEuler', 'Execute', unreal.Vector2D(350.0, node_y_pos), rotation_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(rotation_node_name + '.Euler', '(X=0.000000,Y=0.000000,Z=0.000000)')
blueprint.get_controller_by_name('RigVMModel').set_pin_expansion(rotation_node_name + '.Euler', True)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(rotation_node_name + '.RotationOrder', 'ZYX')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(rotation_node_name + '.Euler.X', str(x_preferred * -1.0), False)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(rotation_node_name + '.Euler.Y', str(y_preferred), False)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(rotation_node_name + '.Euler.Z', str(z_preferred), False)
# Get Control Float
control_float_node_name = start_bone_name + "_to_" + end_bone_name + "_ControlFloat"
blueprint.get_controller_by_name('RigVMModel').add_unit_node_from_struct_path('/project/.RigUnit_GetControlFloat', 'Execute', unreal.Vector2D(500.0, node_y_pos), control_float_node_name)
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(control_float_node_name + '.Control', 'None')
blueprint.get_controller_by_name('RigVMModel').set_pin_default_value(control_float_node_name + '.Control', ctrl_name, False)
blueprint.get_controller_by_name('RigVMModel').add_link(rotation_node_name + '.Result', rotation_pin + '.Rotation')
blueprint.get_controller_by_name('RigVMModel').add_link(next_forward_execute, distribute_node_name + '.ExecuteContext')
blueprint.get_controller_by_name('RigVMModel').add_link(control_float_node_name + '.FloatValue', distribute_node_name + '.Weight')
parent_control_to_control(hierarchy_controller, parent_control_name, ctrl_name)
# Offset the control so it's not in the figure. Not working...
up_vector = bone_limits[start_bone_name].get_up_vector()
#up_transform = unreal.Transform(location=up_vector,rotation=[0.000000,0.000000,0.000000],scale=[1.000000,1.000000,1.000000])
#up_transform = unreal.Transform(location=[0.0, 0.0, 50.0],rotation=[0.000000,0.000000,0.000000],scale=[1.000000,1.000000,1.000000])
#hierarchy.set_control_offset_transform(unreal.RigElementKey(type=unreal.RigElementType.CONTROL, name=ctrl_name), up_transform)
node_y_pos = node_y_pos + 350
next_forward_execute = distribute_node_name + '.ExecuteContext'
|
import unreal
import taichi as ti
from Utilities.Utils import Singleton
ti.init(arch=ti.gpu)
n = 120
pixels = ti.field(dtype=float, shape=(n * 2, n))
@ti.func
def complex_sqr(z):
return ti.Vector([z[0]**2 - z[1]**2, z[1] * z[0] * 2])
@ti.kernel
def paint(t: float):
for i, j in pixels: # Parallelized over all pixels
c = ti.Vector([-0.8, ti.cos(t) * 0.2])
z = ti.Vector([i / n - 1, j / n - 0.5]) * 2
iterations = 0
while z.norm() < 20 and iterations < 50:
z = complex_sqr(z) + c
iterations += 1
pixels[i, j] = 1 - iterations * 0.02
class Taichi_fractal(metaclass=Singleton):
def __init__(self, jsonPath:str):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_image = "taichi_fractal_image"
self.colors = [unreal.LinearColor(1, 1, 1, 1) for _ in range(2 * n * n)]
self.tick_count = 0
def on_tick(self):
paint(self.tick_count * 0.03)
x_np = pixels.to_numpy()
width = 2 * n
height = n
for x, v in enumerate(x_np):
for y, c in enumerate(v):
index = y * width + x
self.colors[index].r = self.colors[index].g = self.colors[index].b = float(c)
self.data.set_image_pixels(self.ui_image, self.colors, width, height)
self.tick_count += 1
def on_button_click(self):
self.tick_count = 0
|
import os
import xml.etree.ElementTree as ET
CRY_ENGINE_OUTPUT_FOLDER_ROOT = "D:/project/"
LEVEL_ROOT_FOLDER = "data/levels" # Removed leading slash for consistency
LEVEL_LAYERS_FOLDER = "layers"
LEVEL_EDITOR_XML = "level.editor_xml"
PREFAB_ROOT_FOLDER = 'prefabs'
LEVEL_NAME = "rataje"
LAYER_WHITELIST = [
"Rataje",
]
class StaticMesh:
def __init__(self):
self.name = None
self.pos = None
self.rotate = None
self.scale = None
self.mesh_path = None
def init_from_brush_xml(self, xml_node):
self.name = xml_node.get("Name") if "Name" in xml_node.attrib else None
self.pos = xml_node.get("Pos") if "Pos" in xml_node.attrib else "0.0,0.0,0.0"
self.rotate = xml_node.get("Rotate") if "Rotate" in xml_node.attrib else "0.0,0.0,0.0,0.0"
self.scale = xml_node.get("Scale") if "Scale" in xml_node.attrib else "1.0,1.0,1.0"
self.mesh_path = xml_node.get("Prefab") if "Prefab" in xml_node.attrib else None
def init_from_geo_xml(self, xml_node):
self.name = xml_node.get("Name") if "Name" in xml_node.attrib else None
self.pos = xml_node.get("Pos") if "Pos" in xml_node.attrib else "0.0,0.0,0.0"
self.rotate = xml_node.get("Rotate") if "Rotate" in xml_node.attrib else "0.0,0.0,0.0,0.0"
self.scale = xml_node.get("Scale") if "Scale" in xml_node.attrib else "1.0,1.0,1.0"
self.mesh_path = xml_node.get("Geometry") if "Geometry" in xml_node.attrib else None
class PrefabActor:
def __init__(self):
self.name = None
self.pos = None
self.rotate = None
self.scale = None
self.prefab_name = None
def init_from_brush_xml(self, xml_node):
self.name = xml_node.get("Name") if "Name" in xml_node.attrib else None
self.pos = xml_node.get("Pos") if "Pos" in xml_node.attrib else "0.0,0.0,0.0"
self.rotate = xml_node.get("Rotate") if "Rotate" in xml_node.attrib else "0.0,0.0,0.0,0.0"
self.scale = xml_node.get("Scale") if "Scale" in xml_node.attrib else "1.0,1.0,1.0"
self.prefab_name = xml_node.get("PrefabName") if "PrefabName" in xml_node.attrib else None
class Layer:
def __init__(self, name):
self.name = name
self.prefab_actors = []
self.static_meshes = []
self.child_layers = []
def init_from_xml(self, xml_node):
# self.name = xml_node.get("Name") if "Name" in xml_node.attrib else None
self.prefab_actors = []
self.static_meshes = []
self.child_layers = []
# Iterate through child objects
objects_node = xml_node.find(".//LayerObjects")
if objects_node is not None:
for obj_node in objects_node.findall("Object"):
obj_type = obj_node.get("Type")
if obj_type == "Prefab":
prefab_actor = PrefabActor()
prefab_actor.init_from_brush_xml(obj_node)
self.prefab_actors.append(prefab_actor)
elif obj_type == "GeomEntity":
static_mesh = StaticMesh()
static_mesh.init_from_geo_xml(obj_node)
self.static_meshes.append(static_mesh)
elif obj_type == "Brush":
static_mesh = StaticMesh()
static_mesh.init_from_brush_xml(obj_node)
self.static_meshes.append(static_mesh)
child_layers_node = xml_node.find(".//ChildLayers")
layers_node = child_layers_node.findall("Layer") if child_layers_node is not None else None
if layers_node is not None:
for layer_node in layers_node:
fullname = layer_node.get("FullName")
name = layer_node.get("Name")
layer_xml_path = os.path.join(CRY_ENGINE_OUTPUT_FOLDER_ROOT, LEVEL_ROOT_FOLDER, LEVEL_NAME, LEVEL_LAYERS_FOLDER, f"{fullname}.lyr")
if os.path.exists(layer_xml_path):
tree = ET.parse(layer_xml_path)
root = tree.getroot()
layer = Layer(name)
layer.init_from_xml(root)
self.child_layers.append(layer)
else:
print(f"Layer file not found: {layer_xml_path}")
print(f"Layer: {self.name}")
print(f"Prefab Actors: {[actor.name for actor in self.prefab_actors]}")
def get_mesh_paths(self):
mesh_paths = set()
for static_mesh in self.static_meshes:
if static_mesh.mesh_path:
mesh_paths.add(static_mesh.mesh_path)
for child_layer in self.child_layers:
child_mesh_paths = child_layer.get_mesh_paths()
mesh_paths.update(child_mesh_paths)
return mesh_paths
def get_prefab_paths(self):
prefab_actor_paths = set()
for prefab_actor in self.prefab_actors:
if prefab_actor.prefab_name:
prefab_actor_paths.add(prefab_actor.prefab_name)
for child_layer in self.child_layers:
child_prefab_actor_paths = child_layer.get_prefab_paths()
prefab_actor_paths.update(child_prefab_actor_paths)
return prefab_actor_paths
class Prefab:
def __init__(self):
self.name = None
self.library = None
self.static_meshes = []
def init_from_xml(self, xml_node):
self.name = xml_node.get("Name") if "Name" in xml_node.attrib else None
self.library = xml_node.get("Library") if "Library" in xml_node.attrib else None
self.static_meshes = []
# Iterate through child objects
objects_node = xml_node.find("Objects")
if objects_node is not None:
for obj_node in objects_node.findall("Object"):
obj_type = obj_node.get("Type")
if obj_type == "GeomEntity":
static_mesh = StaticMesh()
static_mesh.init_from_geo_xml(obj_node)
self.static_meshes.append(static_mesh)
elif obj_type == "Brush":
static_mesh = StaticMesh()
static_mesh.init_from_brush_xml(obj_node)
self.static_meshes.append(static_mesh)
def get_prefab_name(self):
return f"{self.library}.{self.name}" if self.library else self.name
def get_mesh_paths(self):
mesh_paths = set()
for static_mesh in self.static_meshes:
if static_mesh.mesh_path:
mesh_paths.add(static_mesh.mesh_path)
return mesh_paths
class Level:
def __init__(self):
self.name = None
self.prefabs = {}
self.layers = []
def get_all_mesh_paths(self):
all_mesh_paths = set()
all_mesh_paths = set()
for layer in self.layers:
layer_mesh_paths = layer.get_mesh_paths()
all_mesh_paths.update(layer_mesh_paths)
all_prefab_paths = layer.get_prefab_paths()
for prefab in self.prefabs.values():
if prefab in all_prefab_paths:
prefab_mesh_paths = prefab.get_mesh_paths()
all_mesh_paths.update(prefab_mesh_paths)
return all_mesh_paths
def parse_prefabs_library(prefabs_library_node):
prefabs = []
libraries = []
# Parse LevelLibrary for prefabs
level_library_node = prefabs_library_node.find("LevelLibrary")
if level_library_node is not None:
for prefab_node in level_library_node.findall("Prefab"):
prefab = Prefab()
prefab.init_from_xml(prefab_node)
prefabs.append(prefab)
# Parse Library names
for library_node in prefabs_library_node.findall("Library"):
library_name = library_node.get("Name")
if library_name:
libraries.append(library_name)
for library_name in libraries:
library_path = os.path.join(CRY_ENGINE_OUTPUT_FOLDER_ROOT, PREFAB_ROOT_FOLDER, f"{library_name.lower()}.xml")
if os.path.exists(library_path):
tree = ET.parse(library_path)
root = tree.getroot()
for prefab_node in root.findall("Prefab"):
prefab = Prefab()
prefab.init_from_xml(prefab_node)
prefabs.append(prefab)
prefab_dict = {}
for prefab in prefabs:
prefab_dict[prefab.get_prefab_name()] = prefab
return prefab_dict
def parse_level():
level = Level()
level.name = LEVEL_NAME
# Extract and parse the PrefabsLibrary contents
file_path = os.path.join(CRY_ENGINE_OUTPUT_FOLDER_ROOT, LEVEL_ROOT_FOLDER, LEVEL_NAME, LEVEL_EDITOR_XML)
if os.path.exists(file_path):
tree = ET.parse(file_path)
root = tree.getroot()
prefabs_library_node = root.find("PrefabsLibrary")
if prefabs_library_node is not None:
level.prefabs = parse_prefabs_library(prefabs_library_node)
# # Print all prefab names
# for prefab in level.prefabs.keys():
# print(prefab)
else:
print("No <PrefabsLibrary> element found in the XML.")
layers = []
# Properly locate ChildLayers nodes under ObjectLayers -> RootLayer
object_layers_node = root.find(".//ObjectLayers")
if object_layers_node is not None:
root_layer_nodes = object_layers_node.findall("RootLayer")
# if root_layer_node is not None:
# child_layers_node = root_layer_node.find("ChildLayers")
# if child_layers_node is not None:
for layer_node in root_layer_nodes:
fullname = layer_node.get("FullName")
name = layer_node.get("Name")
if name not in LAYER_WHITELIST:
print(f"Layer {name} not in whitelist, skipping.")
continue
layer_xml_path = os.path.join(CRY_ENGINE_OUTPUT_FOLDER_ROOT, LEVEL_ROOT_FOLDER, LEVEL_NAME, LEVEL_LAYERS_FOLDER, f"{fullname}.lyr")
if os.path.exists(layer_xml_path):
tree = ET.parse(layer_xml_path)
root = tree.getroot()
layer = Layer(name)
layer.init_from_xml(root)
layers.append(layer)
else:
print(f"Layer file not found: {layer_xml_path}")
level.layers = layers
else:
print(f"File not found: {file_path}")
return level
import unreal
editor_level_lib = unreal.EditorLevelLibrary()
level_editor_sub = unreal.get_editor_subsystem(unreal.LevelEditorSubsystem)
editor_asset_sub = unreal.get_editor_subsystem(unreal.EditorAssetSubsystem)
data_layer_sub = unreal.get_editor_subsystem(unreal.DataLayerEditorSubsystem)
PLACE_HOLDER_SM= "/project/"
place_holder_sm_obj = unreal.EditorAssetLibrary.load_asset(PLACE_HOLDER_SM)
PREFAB_PACKAGE_PATH = "/project/"
INPUT_PACKAGE_ROOT = "/project/"
import math
def quaternion_to_euler(quaternion):
"""
Converts a quaternion (x, y, z, w) to Euler angles (pitch, yaw, roll) in degrees.
"""
x, y, z, w = quaternion
# Roll (X-axis rotation)
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = math.atan2(sinr_cosp, cosr_cosp)
# Pitch (Y-axis rotation)
sinp = 2 * (w * y - z * x)
if abs(sinp) >= 1:
pitch = math.copysign(math.pi / 2, sinp) # Use 90 degrees if out of range
else:
pitch = math.asin(sinp)
# Yaw (Z-axis rotation)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = math.atan2(siny_cosp, cosy_cosp)
# Convert radians to degrees
roll = math.degrees(roll)
pitch = math.degrees(pitch)
yaw = math.degrees(yaw)
return pitch, yaw, roll
def convert_cryengine_to_unreal_rotation(cryengine_rotate):
"""
Converts CryEngine quaternion rotation to Unreal Engine rotator.
Assumes the input quaternion is in (w, x, y, z) order.
For example, a CryEngine rotation "0.90630782,0.42261824,0,0"
represents a 50ยฐ rotation about the X axis and should produce
an Unreal rotator of (Pitch: 50.0, Yaw: 0.0, Roll: 0.0).
"""
# Parse the quaternion string into floats (expected order: w, x, y, z)
input_quat = [float(coord) for coord in cryengine_rotate.split(",")]
# Reorder to (x, y, z, w) for our conversion formulas
q = [input_quat[1], input_quat[2], input_quat[3], input_quat[0]]
# Compute Euler angles using the standard conversion:
# This returns (pitch, yaw, roll) where roll is rotation about X-axis.
computed_pitch, computed_yaw, computed_roll = quaternion_to_euler(q)
# Remap to Unreal's Rotator:
# Unreal expects (Pitch, Yaw, Roll), but we want the CryEngine X-rotation
# (computed as roll) to map to Unreal's Pitch.
unreal_pitch = computed_roll # X-axis rotation becomes Pitch
unreal_yaw = -1.0*computed_pitch # Y-axis rotation remains Yaw
unreal_roll = -1.0*computed_yaw # Z-axis rotation becomes Roll
return unreal_pitch, unreal_yaw, unreal_roll
def recreate_level_in_unreal(level_data):
with unreal.ScopedSlowTask(len(level_data.layers), "Importing Layers...") as slow_task:
# display the dialog
slow_task.make_dialog(True)
for layer in level_data.layers:
if slow_task.should_cancel():
break
slow_task.enter_progress_frame(1, "Importing Layer {}".format(layer.name))
recreate_layer_in_unreal(layer)
def recreate_layer_in_unreal(layer, parent_layer=None):
data_layer_instance = data_layer_sub.get_data_layer_from_label(layer.name)
if not data_layer_instance:
data_layer_create_param = unreal.DataLayerCreationParameters()
data_layer_create_param.data_layer_asset = None
data_layer_create_param.is_private = True
data_layer_instance = data_layer_sub.create_data_layer_instance(data_layer_create_param)
result = unreal.PythonFunctionLibrary().set_data_layer_short_name(data_layer_instance, layer.name)
if parent_layer:
data_layer_sub.set_parent_data_layer(data_layer_instance, parent_layer)
spawned_actors = []
# Iterate through prefab actors and static meshes
for prefab_actor in layer.prefab_actors:
prefab_actor = spawn_prefab_actor(prefab_actor)
spawned_actors.append(prefab_actor)
for static_mesh in layer.static_meshes:
static_mesh_actor = spawn_static_mesh(static_mesh)
spawned_actors.append(static_mesh_actor)
result = data_layer_sub.add_actors_to_data_layer(spawned_actors, data_layer_instance)
for layer in layer.child_layers:
recreate_layer_in_unreal(layer, data_layer_instance)
def spawn_actor_common(actor, actor_class):
pos_str = actor.pos
pos = [float(coord)*100.0 for coord in pos_str.split(",")]
rotate = convert_cryengine_to_unreal_rotation(actor.rotate)
scale_str = actor.scale
scale = [float(coord) for coord in scale_str.split(",")]
mesh_actor = editor_level_lib.spawn_actor_from_class(actor_class, unreal.Vector(pos[0], -1.0*pos[1], pos[2]))
mesh_actor.set_actor_rotation(unreal.Rotator(rotate[0], rotate[1], rotate[2]), False)
mesh_actor.set_actor_scale3d(unreal.Vector(scale[0], scale[1], scale[2]))
return mesh_actor
def spawn_static_mesh(static_mesh):
mesh_actor = spawn_actor_common(static_mesh, unreal.StaticMeshActor)
mesh_actor.set_actor_label(static_mesh.name)
mesh_component = mesh_actor.get_component_by_class(unreal.StaticMeshComponent)
mesh_package_path = INPUT_PACKAGE_ROOT + '/' + static_mesh.mesh_path.replace('.cgf', '')
if editor_asset_sub.does_asset_exist(mesh_package_path):
static_mesh_obj = editor_asset_sub.load_asset(mesh_package_path)
mesh_component.set_static_mesh(static_mesh_obj)
else:
mesh_component.set_static_mesh(place_holder_sm_obj)
return mesh_actor
def spawn_prefab_actor(prefab_actor):
prefab_path = prefab_actor.prefab_name.replace('.', '/')
prefab_path = PREFAB_PACKAGE_PATH + "/" + prefab_path
level_instance_actor = spawn_actor_common(prefab_actor, unreal.LevelInstance)
world_asset = unreal.EditorAssetLibrary.load_asset(prefab_path)
level_instance_actor.set_editor_property("world_asset", world_asset)
level_instance_actor.set_actor_label(prefab_actor.name)
return level_instance_actor
def generated_all_prefabs(level_data: Level):
for prefab in level_data.prefabs.values():
create_level_prefab(prefab)
def create_level_prefab(prefab: Prefab):
print(prefab.get_prefab_name())
new_level_path = prefab.get_prefab_name().replace('.', '/')
new_level_path = PREFAB_PACKAGE_PATH + "/" + new_level_path
print(new_level_path)
level_editor_sub.new_level(new_level_path, False)
for static_mesh in prefab.static_meshes:
spawn_static_mesh(static_mesh)
level_editor_sub.save_current_level()
if __name__ == "__main__":
level_data = parse_level()
# generated_all_prefabs(level_data)
recreate_level_in_unreal(level_data)
# all_mesh_paths = level_data.get_all_mesh_paths()
# print(all_mesh_paths)
# for mesh_path in all_mesh_paths:
# print(mesh_path)
|
# Copyright 2020 Tomoaki Yoshida<[email protected]>
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/project/.0/.
#
#
#
# You need following modules to run this script
# + pillow
# + numpy
# + gdal
#
# You can install these modules by running following commands on a posh prompt
# PS> cd /project/ Files\Epic Games\UE_4.25\Engine\Binaries\ThirdParty\Python\Win64
# PS> ./python.exe -m pip install pillow numpy
# GDAL cannot install by this simple method. You need to download whl file from
# https://www.lfd.uci.edu/~gohlke/pythonlibs/#gdal
# and then, install it by the similar command
# PS> ./python.exe -m pip install /project/-2.2.4-cp27-cp27m-win_amd64.whl
#
# You may want to add
# --target="/project/ Files\Epic Games\UE_4.25\Engine\Source\ThirdParty\Python\Win64\Lib\site-packages"
# to each pip install command. Without this --target option, modules will be installed in this folder.
# /project/ Files\Epic Games\UE_4.25\Engine\Binaries\ThirdParty\Python\Win64\Lib\site-packages
import unreal
import gdal
import osr
import os
from PIL import Image, ImageTransform, ImageMath
import numpy as np
import math
import sys
al=unreal.EditorAssetLibrary
el=unreal.EditorLevelLibrary
sdir=os.path.dirname(os.path.abspath(__file__))
projdir=os.path.join(sdir,"..","..")
# Utilities
# FVector ([deg], [min], [sec]) -> float [deg]
def Decode60(vec):
return ((vec.z/60.)+vec.y)/60.+vec.x;
# float [deg] -> FVector ([deg], [min], [sec])
def Encode60(v):
d=math.floor(v)
m=math.floor((v-d)*60.)
s=(v-d-m/60.)*3600.
return unreal.Vector(d,m,s)
class LandscapeParams:
def __init__(self):
self.setStepSize(10)
self.setZScale(10)
self.setZEnhance(1)
# landscale cell size [cm]
def setStepSize(self, size):
self.stepSize=size
return self
# landscape z scale value 100-> +/-256 [m], 10-> +/-25.6 [m] 1-> +/-256 [cm]
def setZScale(self, scale):
self.toUEScale=100.*128./scale
self.zScale=scale
return self
# optional z scaling factor
def setZEnhance(self, enh):
self.zEnhance=enh
return self
class GeoTIFF:
def __init__(self, file, lp):
self.stepSize=lp.stepSize
self.gt = gdal.Open(file, gdal.GA_ReadOnly)
#self.rast = np.array([self.gt.GetRasterBand(1).ReadAsArray()])
self.image = Image.fromarray(self.gt.GetRasterBand(1).ReadAsArray())
self.src_cs = osr.SpatialReference()
self.dst_cs = osr.SpatialReference()
self.dst_cs.ImportFromWkt(self.gt.GetProjectionRef())
self.setSrcEPSG(6668) # default to JGD2011
# inverse transform from GeoTIFF(UV) to GeoTIFF(Logical)
self.mat = self.gt.GetGeoTransform()
d = 1./(self.mat[5]*self.mat[1]-self.mat[4]*self.mat[2])
self.iaf = np.array([[ self.mat[5],-self.mat[2]],
[-self.mat[4], self.mat[1]]])*d
self.offset = np.array([[self.mat[0]], [self.mat[3]]])
self.af=np.array([[self.mat[1], self.mat[2]],
[self.mat[4], self.mat[5]]])
def setSrcEPSG(self, epsg):
self.src_cs = osr.SpatialReference()
self.src_cs.ImportFromEPSG(epsg)
self.transS2G = osr.CoordinateTransformation(self.src_cs, self.dst_cs)
# Geotiff CS to Interface CS
self.transG2S=osr.CoordinateTransformation(self.dst_cs,self.src_cs)
def getBL(self,uv):
u=uv[0]
v=uv[1]
bl=np.dot(self.af,np.array([[u],[v]]))+self.offset
sbl=self.transG2S.TransformPoint(bl[1][0],bl[0][0])
return (sbl[0],sbl[1])
def getBBoxBL(self):
# Geotiff CS to Interface CS
return (self.getBL((0,0)),self.getBL((self.gt.RasterXSize,self.gt.RasterYSize)))
def sanitizedBounds(self, bbox=None):
if bbox is None:
bbox=self.getBBoxBL()
tl,br=bbox
bmin, bmax = tl[0], br[0]
if bmin>bmax:
bmin, bmax = bmax, bmin
lmin, lmax = tl[1], br[1]
if lmin>lmax:
lmin, lmax = lmax, lmin
return ((bmin,bmax,lmin,lmax))
def getIntersection(self, bboxBL):
bbox=self.sanitizedBounds(bboxBL)
sbbox=self.sanitizedBounds()
bmin=max(bbox[0],sbbox[0])
bmax=min(bbox[1],sbbox[1])
lmin=max(bbox[2],sbbox[2])
lmax=min(bbox[3],sbbox[3])
if lmax < lmin or bmax < bmin: # No intersection
return None
return ((bmax,lmin),(bmin,lmax)) # North-East, South-West
def getUV(self, srcBL):
gtBL = self.transS2G.TransformPoint(srcBL[1], srcBL[0])
bl=np.array([[gtBL[0]],[gtBL[1]]])
uv = np.dot(self.iaf, bl-self.offset)
return (uv[0][0], uv[1][0])
def getLandscapeBBox(lp):
# search for landscape proxy actors
w=el.get_all_level_actors()
theFirst=True
for a in w:
if(a.get_class().get_name().startswith("Landscape") and not a.get_class().get_name().startswith("LandscapeGizmo")):
#print("Landscape Found : "+ a.get_name())
o,box=a.get_actor_bounds(True)
h=o+box
l=o-box
if(theFirst):
lx=l.x
ly=l.y
hx=h.x
hy=h.y
theFirst=False
else:
if(lx>l.x):
lx=l.x
if(ly>l.y):
ly=l.y
if(hx<h.x):
hx=h.x
if(hy<h.y):
hy=h.y
print("Landscape bounding box: ({0}, {1} - {2}, {3})".format(lx,ly,hx,hy))
print("Landscape size: {0} x {1}".format(hx-lx,hy-ly))
size=(int((hx-lx)/lp.stepSize+1),int((hy-ly)/lp.stepSize+1))
print("Landscape grid size: {0}".format(size))
return (lx,ly,hx,hy,size)
def getGeoReference():
w=el.get_all_level_actors()
theFirst=True
for a in w:
if(a.get_class().get_name().startswith("GeoReferenceBP")):
print("GeoReference Found")
ref=a
ref.initialize_geo_conv()
return ref
class LayerGenerator:
# lp: LandscapeParams
def __init__(self, lp):
self.lp=lp
# Landscape quad in BL coordinate
lx,ly,hx,hy,size=getLandscapeBBox(lp)
self.size=size
self.ref=getGeoReference()
self.tl=tuple(map(Decode60, self.ref.get_bl(lx,ly)))
self.bl=tuple(map(Decode60, self.ref.get_bl(lx,hy)))
self.br=tuple(map(Decode60, self.ref.get_bl(hx,hy)))
self.tr=tuple(map(Decode60, self.ref.get_bl(hx,ly)))
print("Reference Quad=tl:{0} bl:{1} br:{2} tr:{3}".format(self.tl, self.bl, self.br, self.tr))
self.zo=self.ref.get_actor_location()
self.zobl=tuple(map(Decode60, self.ref.get_bl(self.zo.x,self.zo.y)))
print("GeoReference in BL {0} {1}".format(self.zobl[0], self.zobl[1]))
print("GeoReference in UE {0}".format(self.zo))
# Clip lowest height [m] (None for no clipping)
def setZClipping(self, zClipping):
self.zClipping=zClipping
def resample(self, sourceFile, slow_task):
# Landscape quad on geotiff image in UV coordinate
gt=GeoTIFF(sourceFile,self.lp)
tluv=gt.getUV(self.tl)
bluv=gt.getUV(self.bl)
bruv=gt.getUV(self.br)
truv=gt.getUV(self.tr)
zouv=gt.getUV(self.zobl)
uvquad=tluv+bluv+bruv+truv
if self.zClipping is not None:
imageref=Image.new(gt.image.mode,gt.image.size,self.zClipping)
clippedimg=ImageMath.eval("max(a,b)",a=gt.image,b=imageref)
#clippedimg.save(os.path.join(projdir,"Assets","clipped.tif"))
else:
clippedimg=gt.image
# resample geotiff image
slow_task.enter_progress_frame(1,"Transforming image region")
img=clippedimg.transform(self.size,Image.QUAD,data=uvquad,resample=Image.BICUBIC)#, fillcolor=sys.float.min)
slow_task.enter_progress_frame(1,"Transforming height values")
zov=gt.image.getpixel(zouv)
zos=32768-(zov*self.lp.zEnhance-self.zo.z/100.)*self.lp.toUEScale # 32768: mid point (height=0)
# convert height value [m] to unreal height value
slow_task.enter_progress_frame(1,"Converting to 16bit grayscale")
iarrf=np.array(img.getdata())*self.lp.toUEScale*self.lp.zEnhance + zos
return iarrf.clip(0,65535), img.size
|
#Author Josh Whiteside 18/project/
import unreal
def get_all_level_boundaries():
r = unreal.GameplayStatics.get_all_actors_of_class(unreal.EditorLevelLibrary.get_editor_world(),unreal.LevelStreamingVolume)
return r
def get_closest_level_boundary(a,boundaries):
largestLength = 99999999999 #that should do it.
for b in boundaries:
tempVector = b.get_actor_transform().translation.subtract(a.get_actor_transform().translation)
tempLength = tempVector.length()
if tempLength < largestLength:
l = b
largestLength = tempLength
return l
def main():
selected_actors = unreal.EditorLevelLibrary.get_selected_level_actors()
level_boundaries = get_all_level_boundaries()
total_frames = len(selected_actors)
text_label = "Sorting objects"
with unreal.ScopedSlowTask(total_frames, text_label) as slow_task:
slow_task.make_dialog(True)
for x in selected_actors:
if slow_task.should_cancel():
break
b = get_closest_level_boundary(x, level_boundaries)
#Calls c++ function that will find the level streaming of the respective name
ls = unreal.LevelBoundarySortUtilities.get_level_streaming_from_name(b.streaming_level_names[0],x)
a = [x] #Puts the current actor in an array for move actors to level (EditorLevelUtils wants it in an array)
#moves actors to level
unreal.EditorLevelUtils.move_actors_to_level(a,ls,True)
slow_task.enter_progress_frame(1)
main()
|
# -*- coding: utf-8 -*-
import logging
import unreal
import inspect
import types
import Utilities
from collections import Counter
class attr_detail(object):
def __init__(self, obj, name:str):
self.name = name
attr = None
self.bCallable = None
self.bCallable_builtin = None
try:
if hasattr(obj, name):
attr = getattr(obj, name)
self.bCallable = callable(attr)
self.bCallable_builtin = inspect.isbuiltin(attr)
except Exception as e:
unreal.log(str(e))
self.bProperty = not self.bCallable
self.result = None
self.param_str = None
self.bEditorProperty = None
self.return_type_str = None
self.doc_str = None
self.property_rw = None
if self.bCallable:
self.return_type_str = ""
if self.bCallable_builtin:
if hasattr(attr, '__doc__'):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
# print(f"~~~~~ attr: {self.name} docForDisplay: {docForDisplay} paramStr: {paramStr}")
# print(attr.__doc__)
try:
sig = inspect.getargspec(getattr(obj, self.name))
# print("+++ ", sig)
args = sig.args
argCount = len(args)
if "self" in args:
argCount -= 1
except TypeError:
argCount = -1
if "-> " in docForDisplay:
self.return_type_str = docForDisplay[docForDisplay.find(')') + 1:]
else:
self.doc_str = docForDisplay[docForDisplay.find(')') + 1:]
if argCount == 0 or (argCount == -1 and paramStr == ''):
# Method with No params
if '-> None' not in docForDisplay or self.name in ["__reduce__", "_post_init"]:
try:
if name == "get_actor_time_dilation" and isinstance(obj, unreal.Object):
# call get_actor_time_dilation will crash engine if actor is get from CDO and has no world.
if obj.get_world():
# self.result = "{}".format(attr.__call__())
self.result = attr.__call__()
else:
self.result = "skip call, world == None."
else:
# self.result = "{}".format(attr.__call__())
self.result = attr.__call__()
except:
self.result = "skip call.."
else:
print(f"docForDisplay: {docForDisplay}, self.name: {self.name}")
self.result = "skip call."
else:
self.param_str = paramStr
self.result = ""
else:
logging.error("Can't find p")
elif self.bCallable_other:
if hasattr(attr, '__doc__'):
if isinstance(attr.__doc__, str):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
if name in ["__str__", "__hash__", "__repr__", "__len__"]:
try:
self.result = "{}".format(attr.__call__())
except:
self.result = "skip call."
else:
# self.result = "{}".format(getattr(obj, name))
self.result = getattr(obj, name)
def post(self, obj):
if self.bOtherProperty and not self.result:
try:
self.result = getattr(obj, self.name)
except:
self.result = "skip call..."
def apply_editor_property(self, obj, type_, rws, descript):
self.bEditorProperty = True
self.property_rw = "[{}]".format(rws)
try:
self.result = eval('obj.get_editor_property("{}")'.format(self.name))
except:
self.result = "Invalid"
def __str__(self):
s = f"Attr: {self.name} paramStr: {self.param_str} desc: {self.return_type_str} result: {self.result}"
if self.bProperty:
s += ", Property"
if self.bEditorProperty:
s += ", Eidtor Property"
if self.bOtherProperty:
s += ", Other Property "
if self.bCallable:
s += ", Callable"
if self.bCallable_builtin:
s += ", Callable_builtin"
if self.bCallable_other:
s += ", bCallable_other"
if self.bHasParamFunction:
s+= ", bHasParamFunction"
return s
def check(self):
counter = Counter([self.bOtherProperty, self.bEditorProperty, self.bCallable_other, self.bCallable_builtin])
# print("counter: {}".format(counter))
if counter[True] == 2:
unreal.log_error(f"{self.name}: {self.bEditorProperty}, {self.bOtherProperty} {self.bCallable_builtin} {self.bCallable_other}")
@property
def bOtherProperty(self):
if self.bProperty and not self.bEditorProperty:
return True
return False
@property
def bCallable_other(self):
if self.bCallable and not self.bCallable_builtin:
return True
return False
@property
def display_name(self, bRichText=True):
if self.bProperty:
return f"\t{self.name}"
else:
# callable
if self.param_str:
return f"\t{self.name}({self.param_str}) {self.return_type_str}"
else:
if self.bCallable_other:
return f"\t{self.name}" # __hash__, __class__, __eq__ ็ญ
else:
return f"\t{self.name}() {self.return_type_str}"
@property
def display_result(self) -> str:
if self.bEditorProperty:
return "{} {}".format(self.result, self.property_rw)
else:
return "{}".format(self.result)
@property
def bHasParamFunction(self):
return self.param_str and len(self.param_str) != 0
def ll(obj):
if not obj:
return None
if inspect.ismodule(obj):
return None
result = []
for x in dir(obj):
attr = attr_detail(obj, x)
result.append(attr)
if hasattr(obj, '__doc__') and isinstance(obj, unreal.Object):
editorPropertiesInfos = _getEditorProperties(obj.__doc__, obj)
for name, type_, rws, descript in editorPropertiesInfos:
# print(f"~~ {name} {type} {rws}, {descript}")
index = -1
for i, v in enumerate(result):
if v.name == name:
index = i
break
if index != -1:
this_attr = result[index]
else:
this_attr = attr_detail(obj, name)
result.append(this_attr)
# unreal.log_warning(f"Can't find editor property: {name}")
this_attr.apply_editor_property(obj, type_, rws, descript)
for i, attr in enumerate(result):
attr.post(obj)
return result
def _simplifyDoc(content):
def next_balanced(content, s="(", e = ")" ):
s_pos = -1
e_pos = -1
balance = 0
for index, c in enumerate(content):
match = c == s or c == e
if not match:
continue
balance += 1 if c == s else -1
if c == s and balance == 1 and s_pos == -1:
s_pos = index
if c == e and balance == 0 and s_pos != -1 and e_pos == -1:
e_pos = index
return s_pos, e_pos
return -1, -1
# bracketS, bracketE = content.find('('), content.find(')')
if not content:
return "", ""
bracketS, bracketE = next_balanced(content, s='(', e = ')')
arrow = content.find('->')
funcDocPos = len(content)
endSign = ['--', '\n', '\r']
for s in endSign:
p = content.find(s)
if p != -1 and p < funcDocPos:
funcDocPos = p
funcDoc = content[:funcDocPos]
if bracketS != -1 and bracketE != -1:
param = content[bracketS + 1: bracketE].strip()
else:
param = ""
return funcDoc, param
def _getEditorProperties(content, obj):
# print("Content: {}".format(content))
lines = content.split('\r')
signFound = False
allInfoFound = False
result = []
for line in lines:
if not signFound and '**Editor Properties:**' in line:
signFound = True
if signFound:
#todo re
# nameS, nameE = line.find('``') + 2, line.find('`` ')
nameS, nameE = line.find('- ``') + 4, line.find('`` ')
if nameS == -1 or nameE == -1:
continue
typeS, typeE = line.find('(') + 1, line.find(')')
if typeS == -1 or typeE == -1:
continue
rwS, rwE = line.find('[') + 1, line.find(']')
if rwS == -1 or rwE == -1:
continue
name = line[nameS: nameE]
type_str = line[typeS: typeE]
rws = line[rwS: rwE]
descript = line[rwE + 2:]
allInfoFound = True
result.append((name, type_str, rws, descript))
# print(name, type, rws)
if signFound:
if not allInfoFound:
unreal.log_warning("not all info found {}".format(obj))
else:
unreal.log_warning("can't find editor properties in {}".format(obj))
return result
def log_classes(obj):
print(obj)
print("\ttype: {}".format(type(obj)))
print("\tget_class: {}".format(obj.get_class()))
if type(obj.get_class()) is unreal.BlueprintGeneratedClass:
generatedClass = obj.get_class()
else:
generatedClass = unreal.PythonBPLib.get_blueprint_generated_class(obj)
print("\tgeneratedClass: {}".format(generatedClass))
print("\tbp_class_hierarchy_package: {}".format(unreal.PythonBPLib.get_bp_class_hierarchy_package(generatedClass)))
def is_selected_asset_type(types):
selectedAssets = Utilities.Utils.get_selected_assets()
for asset in selectedAssets:
if type(asset) in types:
return True;
return False
|
import unreal
output_list:list[list[str, str]] = [[]]
selected_assets:list[unreal.Object] = unreal.EditorUtilityLibrary.get_selected_assets()
asset_index = 0
for asset in selected_assets:
loaded_asset = unreal.EditorAssetLibrary.load_asset(asset.get_path_name().split('.')[0])
all_metadata = unreal.EditorAssetLibrary.get_metadata_tag_values(loaded_asset)
for tag_name, value in all_metadata.items():
output_list[asset_index]=[str(tag_name), value]
unreal.log("Value of tag " + str(tag_name) + " for asset " + ": " + value)
asset_index += 1
print(output_list)
|
import unreal
file_a = "/project/.fbx"
file_b = "/project/.fbx"
imported_scenes_path = "/project/"
print 'Preparing import options...'
advanced_mesh_options = unreal.DatasmithStaticMeshImportOptions()
advanced_mesh_options.set_editor_property('max_lightmap_resolution', unreal.DatasmithImportLightmapMax.LIGHTMAP_512)
advanced_mesh_options.set_editor_property('min_lightmap_resolution', unreal.DatasmithImportLightmapMin.LIGHTMAP_64)
advanced_mesh_options.set_editor_property('generate_lightmap_u_vs', True)
advanced_mesh_options.set_editor_property('remove_degenerates', True)
base_options = unreal.DatasmithImportBaseOptions()
base_options.set_editor_property('include_geometry', True)
base_options.set_editor_property('include_material', True)
base_options.set_editor_property('include_light', True)
base_options.set_editor_property('include_camera', True)
base_options.set_editor_property('include_animation', True)
base_options.set_editor_property('static_mesh_options', advanced_mesh_options)
base_options.set_editor_property('scene_handling', unreal.DatasmithImportScene.CURRENT_LEVEL)
base_options.set_editor_property('asset_options', []) # Not used
vred_options = unreal.DatasmithVREDImportOptions()
vred_options.set_editor_property('merge_nodes', False)
vred_options.set_editor_property('optimize_duplicated_nodes', False)
vred_options.set_editor_property('import_var', True)
vred_options.set_editor_property('var_path', "")
vred_options.set_editor_property('import_light_info', True)
vred_options.set_editor_property('light_info_path', "")
vred_options.set_editor_property('import_clip_info', True)
vred_options.set_editor_property('clip_info_path', "")
vred_options.set_editor_property('textures_dir', "")
vred_options.set_editor_property('import_animations', True)
vred_options.set_editor_property('intermediate_serialization', unreal.DatasmithVREDIntermediateSerializationType.DISABLED)
vred_options.set_editor_property('colorize_materials', False)
vred_options.set_editor_property('generate_lightmap_u_vs', False)
vred_options.set_editor_property('import_animations', True)
# Direct import to scene and assets:
print 'Importing directly to scene...'
unreal.VREDLibrary.import_(file_a, imported_scenes_path, base_options, None, True)
#2-stage import step 1:
print 'Parsing to scene object...'
scene = unreal.DatasmithVREDSceneElement.construct_datasmith_scene_from_file(file_b, imported_scenes_path, base_options, vred_options)
print 'Resulting datasmith scene: ' + str(scene)
print '\tProduct name: ' + str(scene.get_product_name())
print '\tMesh actor count: ' + str(len(scene.get_all_mesh_actors()))
print '\tLight actor count: ' + str(len(scene.get_all_light_actors()))
print '\tCamera actor count: ' + str(len(scene.get_all_camera_actors()))
print '\tCustom actor count: ' + str(len(scene.get_all_custom_actors()))
print '\tMaterial count: ' + str(len(scene.get_all_materials()))
print '\tAnimNode count: ' + str(len(scene.get_all_anim_nodes()))
print '\tAnimClip count: ' + str(len(scene.get_all_anim_clips()))
print '\tExtra light info count: ' + str(len(scene.get_all_extra_lights_info()))
print '\tVariant count: ' + str(len(scene.get_all_variants()))
# Modify one of the AnimNodes
# Warning: The AnimNode nested structure is all USTRUCTs, which are value types, and the Array accessor returns
# a copy. Meaning something like anim_nodes[0].name = 'new_name' will set the name on the COPY of anim_nodes[0]
anim_nodes = scene.get_all_anim_nodes()
if len(anim_nodes) > 0:
node_0 = anim_nodes[0]
old_name = node_0.name
print 'Anim node old name: ' + old_name
node_0.name += '_MODIFIED'
modified_name = node_0.name
print 'Anim node modified name: ' + modified_name
anim_nodes[0] = node_0
scene.set_all_anim_nodes(anim_nodes)
# Check modification
new_anim_nodes = scene.get_all_anim_nodes()
print 'Anim node retrieved modified name: ' + new_anim_nodes[0].name
assert new_anim_nodes[0].name == modified_name, "Node modification didn't work!"
# Restore to previous state
node_0 = new_anim_nodes[0]
node_0.name = old_name
new_anim_nodes[0] = node_0
scene.set_all_anim_nodes(new_anim_nodes)
# 2-stage import step 2:
print 'Importing assets and actors...'
result = scene.import_scene()
print 'Import results: '
print '\tImported actor count: ' + str(len(result.imported_actors))
print '\tImported mesh count: ' + str(len(result.imported_meshes))
print '\tImported level sequences: ' + str([a.get_name() for a in result.animations])
print '\tImported level variant sets asset: ' + str(result.level_variant_sets.get_name())
if result.import_succeed:
print 'Import succeeded!'
else:
print 'Import failed!'
|
from domain.material_service import MaterialService
from infrastructure.configuration.path_manager import PathManager
from infrastructure.configuration.message import LogMaterialsMissingPhysMatsMessage, LogMaterialsUsingTranslucencyMessage, OptimiseScriptLogMaterialsUsingTwoSidedMessage
from infrastructure.data.base_repository import BaseRepository
from infrastructure.logging.base_logging import BaseLogging
from infrastructure.utils.unreal_extensions import UnrealSystemLibrary
import unreal
class MaterialHandler:
def __init__(self, _materialService: MaterialService, _systemLibrary: UnrealSystemLibrary, _repository: BaseRepository, _logging: BaseLogging):
self.materialService = _materialService
self.systemLibrary = _systemLibrary
self.repository = _repository
self.logging = _logging
def log_materials_missing_phys_mats(self):
workingPath = PathManager.get_source_dir()
allAssets = self.repository.list()
logStringsArray = []
with unreal.ScopedSlowTask(len(allAssets), workingPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
result = self.materialService.is_material_missing_phys_mat(asset)
if result.hasMessage():
logStringsArray.append(result.message)
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
self.logging.log(LogMaterialsMissingPhysMatsMessage().build_log_summary(logStringsArray))
def log_materials_using_translucency(self):
workingPath = PathManager.get_source_dir()
allAssets = self.repository.list()
logStringsArray = []
with unreal.ScopedSlowTask(len(allAssets), workingPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
result = self.materialService.is_materials_using_translucency(asset)
if result.hasMessage():
logStringsArray.append(result.message)
"""
# material instances have no blend mode stuff exposed atm so cant do this
elif _assetClassName == "MaterialInstanceConstant":
asset_obj = EditAssetLib.load_asset(asset)
_MaterialInstanceAsset = unreal.MaterialInstance.cast(_assetData.get_asset())
# unreal.log(_MaterialAsset.blend_mode)
if _MaterialInstanceAsset.blend_mode == unreal.BlendMode.BLEND_TRANSLUCENT:
LogStringsArray.append(" [MIC] %s ------------> At Path: %s \n" % (_assetName, _assetPathName))
# unreal.log("Asset Name: %s Path: %s \n" % (_assetName, _assetPathName))
# unreal.log("is a translucent material instance")
numOfOptimisations += 1
"""
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
self.logging.log(LogMaterialsUsingTranslucencyMessage().build_log_summary(logStringsArray))
def log_materials_using_two_sided(self):
workingPath = PathManager.get_source_dir()
allAssets = self.repository.list()
logStringsArray = []
with unreal.ScopedSlowTask(len(allAssets), workingPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
result = self.materialService.is_two_sided_material(asset)
if result.hasMessage():
logStringsArray.append(result.message)
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
self.logging.log(OptimiseScriptLogMaterialsUsingTwoSidedMessage().build_log_summary(logStringsArray))
|
import numpy as np
import random
import unreal
import time
import os
class Node:
def __init__(self,x,y,rotation1, rotation2 = None):
self.x = x
self.y = y
self.rotatew1 = rotation1
self.rotatew2 = rotation2
self.visited = False
self.deleteWall1Bottom = False
self.deleteWall2Right = False
self.isSideWall = False
self.isDummy = False
def __str__(self):
wall2_info = f", RC({self.x},{self.y},{self.rotatew2})" if self.rotatew2 != None else ""
return f"RC({self.x},{self.y}, {self.rotatew1}){wall2_info}"
def parse_randomness(maze,solution_path):
directions = [(0,1),(1,0),(0,-1),(-1,0)]
for i in range(len(maze)):
for j in range(len(maze[i])):
for direction in directions:
r,c = i+ direction[0], j + direction[1] #maze length returns 10,10
if 0 <= r < len(maze)-1 and 0 <= c < len(maze[0]) - 1:
if maze[r][c] in solution_path or maze[r][c].isSideWall:
continue
node = maze[r][c]
rng = random.randint(0,2)
match(rng):
case 0:
if node.deleteWall2Right == False:
node.deleteWall2Right = True
case 1:
if node.deleteWall2Right == False and node.deleteWall1Bottom == False:
node.deleteWall2Right = True
node.deleteWall1Bottom = True
case _:
continue
def generate_maze(row,col):
maze = []
print("GEN MAZE")
maze = [[None for _ in range(row)] for _ in range(col)]
for i in range(row):
for j in range(col):
if (i >= 0 and j == 0): #If the row 0 - MAX and Column 0, Left Wall Only one node
if i == row-1:
maze[i][j] = Node(i,j,90)
maze[i][j].isSideWall = True
maze[i][j].isDummy = True
else:
maze[i][j] = Node(i,j,0)
maze[i][j].isSideWall = True
elif(j >=0 and i ==0):
maze[i][j] = Node(i,j,-90)
maze[i][j].isSideWall = True
else:
two_walls = Node(i,j,180)
two_walls.rotatew2 = -90
maze[i][j] = two_walls
for row in maze:
for node in row:
print(node)
return maze
def DFS(maze,start,end): #Make sure to pass in a Node Object from Maze[i][j]
stack = []
solution_path = []
stack.append(start)
print("Start DFS")
while stack:
current = stack.pop()
if current.visited:
continue
current.visited = True
solution_path.append(current)
if current == end:
break
row,col = current.x, current.y
neighbor_moves = [
(row-1 , col), #Up
(row, col-1), #Left
(row+1, col), #Down
(row, col+1) #Right
]
for r,c in neighbor_moves:
if 0 <= r < len(maze) and 0 <= c < len(maze[0]):
neighbor = maze[r][c]
if neighbor == None:
continue
if not neighbor.visited:
stack.append(neighbor)
if not solution_path or solution_path[-1] != end:
print("No Solution")
return maze, []
print("--------------SOLUTION PATH------------------")
for i in range(len(solution_path)):
print(solution_path[i])
return maze, solution_path
def parse_solution(solution_path, maze):
for p in range(len(solution_path) - 1):
for i in range(len(maze)):
for j in range(len(maze[i])):
if maze[i][j] == solution_path[p]:
current = maze[i][j]
next_cell = solution_path[p+1]
x,y = current.x, current.y
nx,ny = next_cell.x, next_cell.y
if current.isSideWall:
third_cell = solution_path[p+2]
third_cell.deleteWall2Right = True
third_cell.deletWall1Bottom = True
if (nx,ny) == (x-1,y): #go right
next_cell.deleteWall2Right = True
if (nx,ny) == (x+1,y): #go left
current.deleteWall2Right = True
if (nx,ny) == (x,y-1): #go down
next_cell.deleteWall1Bottom = True
if (nx,ny) == (x,y+1): #go up
current.deleteWall1Bottom = True
def draw_maze(maze_info, row, col, start,end):
#these where some assets that I made and used but for simplicity I editted them out.
""" regular_wall = "/project/.Stonewall"
secondary_wall = "/project/.Glasswall"
flag_asset = "/project/.SM_Flag" """
regular_wall = "/project/.Wall_400x400"
secondary_wall = "/project/.Wall_400x400"
flag_asset = "/project/.StaticMesh'/project/.SM_PillarFrame'"
world = unreal.EditorLevelLibrary.get_editor_world()
player_start_class = unreal.PlayerStart
player_start_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(player_start_class, unreal.Vector(start.x * 200,start.y * 200,0), unreal.Rotator(0,0,0))
cell_size = 400.0
wall_asset1 = wall_asset = unreal.EditorAssetLibrary.load_asset(regular_wall)
wall_asset2 = wall_asset = unreal.EditorAssetLibrary.load_asset(secondary_wall)
for i in range(row):
for j in range(col):
if(i == row -1 and j == 0):
continue
x = random.randint(0,30)
if x > 15:
wall_asset = wall_asset1
else:
wall_asset = wall_asset2
position = unreal.Vector(maze_info[i][j].x * cell_size, maze_info[i][j].y * cell_size, 0)
if maze_info[i][j] == end:
flag_assetL = unreal.EditorAssetLibrary.load_asset(flag_asset)
flag_position = unreal.Vector(maze_info[i][j].x * cell_size - 200, maze_info[i][j].y*cell_size - 200, 0)
flag_actor = unreal.EditorLevelLibrary.spawn_actor_from_object(flag_assetL,flag_position,unreal.Rotator(0,0,0))
flag_actor.set_actor_scale3d(unreal.Vector(10.0,10.0,10.0))
if maze[i][j].isDummy:
continue
if maze[i][j].isSideWall: #If the wall is a side wall only make one wall with one rotation
unreal.EditorLevelLibrary.spawn_actor_from_object(wall_asset,position,unreal.Rotator(0,0,maze_info[i][j].rotatew1))
elif maze[i][j].deleteWall1Bottom: #Not Side Wall and delete bottom wall, draw Right wall only
unreal.EditorLevelLibrary.spawn_actor_from_object(wall_asset,position,unreal.Rotator(0,0,maze_info[i][j].rotatew2))
elif maze[i][j].deleteWall2Right: #if we only want the bottom wall just draw the bottom wall 1
unreal.EditorLevelLibrary.spawn_actor_from_object(wall_asset,position,unreal.Rotator(0,0,maze_info[i][j].rotatew1))
else: #If we want both walls because it is not int the path, draw both walls.
unreal.EditorLevelLibrary.spawn_actor_from_object(wall_asset,position,unreal.Rotator(0,0,maze_info[i][j].rotatew1))
unreal.EditorLevelLibrary.spawn_actor_from_object(wall_asset,position,unreal.Rotator(0,0,maze_info[i][j].rotatew2))
row, col = 10,10
maze = generate_maze(row,col)
start = maze[1][2]
end = maze[9][9]
maze,solution_path = DFS(maze,start,end)
parse_solution(solution_path, maze)
parse_randomness(maze,solution_path)
draw_maze(maze,row,col, start, end)
|
import unreal
tickhandle = None
def testRegistry(deltaTime):
unreal.log_warning("ticking.")
asset_registry = unreal.AssetRegistryHelpers.get_asset_registry()
if asset_registry.is_loading_assets():
unreal.log_warning("still loading...")
else:
unreal.log_warning("ready!")
unreal.unregister_slate_pre_tick_callback(tickhandle)
tickhandle = unreal.register_slate_pre_tick_callback(testRegistry)
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__author__ = "timmyliang"
__email__ = "[email protected]"
__date__ = "2021-08-09 15:36:57"
import unreal
(skeleton,) = unreal.EditorUtilityLibrary.get_selected_assets()
for bone in skeleton.get_editor_property('bone_tree'):
print(bone)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 11:46:18 2024
@author: WillQuantique
"""
import unreal
import time
import random as rd
import sys
sys.path.append("D:/Lab Project/UE_5.3/project/")
import numpy as np
def reset_scene(sequence_path):
"""
Deletes the specified sequence from the asset library and removes the MetaHuman actor and camera actor from the scene.
Args:
sequence_path (str): The path to the sequence asset in the asset library.
Returns:
None
"""
# Delete the sequence from the asset library
sequence_asset = unreal.EditorAssetLibrary.load_asset(sequence_path)
if sequence_asset:
unreal.EditorAssetLibrary.delete_asset(sequence_path)
print(f"Sequence '{sequence_path}' deleted from the asset library.")
else:
print(f"Sequence '{sequence_path}' not found in the asset library.")
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in all_actors:
unreal.log(actor.get_name())
if "F" in actor.get_name() or "Rig" in actor.get_name() or actor.get_class().get_name() == "CineCameraActor" or "H" in actor.get_name():
unreal.EditorLevelLibrary.destroy_actor(actor)
print(f"Deleted actor: {actor.get_name()}")
def SetupCineCameraActor(location=unreal.Vector(0, 0, 0), rotation=unreal.Rotator(0, 0, 0)):
# adapted from https://github.com/project/.py
"""
Parameters
----------
location : Unreal vector for camera location
The default is unreal.Vector(0, 0, 0).
rotation : Unreal rotator for camera rotation
The default is unreal.Rotator(0, 0, 0).
focus : float, for focus distance
The default is 132.0.
Returns
-------
camera_actor : Unreal actor
The camera set with given rotation, position
"""
# Spawn a Camera Actor in the World
world = unreal.EditorLevelLibrary.get_editor_world()
camera_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.CineCameraActor, location, rotation)
# create an instance of camera settings and set it to manual focus method
settings = unreal.CameraFocusSettings()
settings.focus_method = unreal.CameraFocusMethod.MANUAL
# create the camera and pass the settings to it
ccc = camera_actor.get_cine_camera_component()
ccc.set_editor_property("focus_settings", settings)
return camera_actor
def CreateSequence(sequence_name, camera, length_frames=1, package_path='/project/'):
'''
Args:
sequence_name(str): The name of the sequence to be created
length_frames(int): The number of frames in the camera cut section.
package_path(str): The location for the new sequence
Returns:
unreal.LevelSequence: The created level sequence
'''
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
# Create the sequence asset in the desired location
sequence = unreal.AssetToolsHelpers.get_asset_tools().create_asset(
sequence_name,
package_path,
unreal.LevelSequence,
unreal.LevelSequenceFactoryNew())
sequence.set_playback_end(length_frames)
movie_scene = sequence.get_movie_scene()
for actor in all_actors:
unreal.log(actor.get_class().get_name())
if actor.get_class().get_name() == "CineCameraActor":
camera = actor
break
cam_binding = sequence.add_possessable(camera)
for actor in all_actors:
actor_binding = sequence.add_possessable(actor)
ccc = camera.get_cine_camera_component()
filmback = unreal.CameraFilmbackSettings()
filmback.sensor_width = 16
filmback.sensor_height = 16
ccc.set_editor_property("filmback", filmback)
pp_settings = ccc.get_editor_property('post_process_settings')
pp_settings.motion_blur_amount = 0
ccc.set_editor_property('post_process_settings', pp_settings)
camera_cut_track = sequence.add_master_track(unreal.MovieSceneCameraCutTrack)
# Add a camera cut track for this camera
# Make sure the camera cut is stretched to the -1 mark
camera_cut_section = camera_cut_track.add_section()
camera_cut_section.set_start_frame(-1)
camera_cut_section.set_end_frame(length_frames)
# bind the camera
camera_binding_id = unreal.MovieSceneObjectBindingID()
camera_binding_id.set_editor_property("Guid", cam_binding.get_id())
camera_cut_section.set_editor_property("CameraBindingID", camera_binding_id)
# Add a current focal length track to the cine camera component
camera_component = camera.get_cine_camera_component()
camera_component_binding = sequence.add_possessable(camera_component)
camera_component_binding.set_parent(cam_binding)
focal_length_track = camera_component_binding.add_track(unreal.MovieSceneFloatTrack)
focal_length_track.set_property_name_and_path('CurrentFocalLength', 'CurrentFocalLength')
focal_length_section = focal_length_track.add_section()
focal_length_section.set_start_frame_bounded(0)
focal_length_section.set_end_frame_bounded(length_frames)
# add a transform track for later manipulation
transform_track = cam_binding.add_track(unreal.MovieScene3DTransformTrack)
transform_track.set_property_name_and_path('Transform', 'Transform')
transform_section = transform_track.add_section()
transform_section.set_start_frame_bounded(0)
transform_section.set_end_frame_bounded(length_frames)
# add track for focus distance
# Add a track for the manual focus distance
focus_distance_track = camera_component_binding.add_track(unreal.MovieSceneFloatTrack)
focus_distance_track.set_property_name_and_path("ManualFocusDistance", "FocusSettings.ManualFocusDistance")
focus_distance_section = focus_distance_track.add_section()
focus_distance_section.set_start_frame_bounded(0)
focus_distance_section.set_end_frame_bounded(length_frames)
return sequence
def add_metahuman_components_to_sequence(sequence, metahuman_actor, sequence_path):
# Retrieve all components attached to the metahuman actor
unreal.log("before world")
editor_subsystem = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
sequence_path = ensure_package_name_format(sequence_path)
sequence_to_activate = unreal.load_asset(sequence_path)
print("under_world")
s = unreal.LevelSequenceEditorBlueprintLibrary.open_level_sequence(sequence_to_activate)
print("overworld")
# Use the new recommended method to get the editor world
world = editor_subsystem.get_editor_world()
unreal.log("world")
skeletal_mesh_components = metahuman_actor.get_components_by_class(unreal.SkeletalMeshComponent)
unreal.log("mesh")
# sequence = unreal.LevelSequenceEditorBlueprintLibrary.get_current_level_sequence()
unreal.log("sequence")
for skeletal_mesh_component in skeletal_mesh_components:
# Assuming naming convention to identify relevant skeletal meshes for control rigs
if "face" in skeletal_mesh_component.get_name().lower():
rig = unreal.load_asset('/project/')
skel = sequence.add_possessable(skeletal_mesh_component)
rig_class = rig.get_control_rig_class()
rig_track = unreal.ControlRigSequencerLibrary.find_or_create_control_rig_track(world, sequence, rig_class,
skel)
elif "body" in skeletal_mesh_component.get_name().lower():
rig = unreal.load_asset("/project/")
skel = sequence.add_possessable(skeletal_mesh_component)
rig_class = rig.get_control_rig_class()
rig_track = unreal.ControlRigSequencerLibrary.find_or_create_control_rig_track(world, sequence, rig_class,
skel)
def find_delta_z(rig, sequence):
control_name = "CTRL_C_jaw"
frame_number = unreal.FrameNumber(0)
pos = unreal.ControlRigSequencerLibrary.get_control_rig_world_transform(sequence, rig, control_name, frame_number)
return 142.458249 - pos.translation.z
def LoadBlueprint(blueprint_path: str, loc: unreal.Vector = unreal.Vector(0, 0, 0),
rot: unreal.Rotator = unreal.Rotator(0, 0, 0), size="n"):
"""
Parameters
----------
blueprint_path : str
Unreal path to the blue print eg. Game/project/
loc : unreal.Vector, optional
Desired Position in absolute coordinates The default is unreal.Vector(0,0, 0).
rot : unreal.Rotator, optional
Desired Rotation The default is unreal.Rotator(0, 0, 0).
Returns
-------
actor : TYPE
Actor as defined by the blue print
"""
asset = unreal.EditorAssetLibrary.load_asset(blueprint_path)
if asset is None:
print(f"Failed to load asset at {blueprint_path}")
return None
if size == "g":
loc.z -= 10
elif size == "tg":
loc.z -= 19.32
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(asset, loc, rot)
return actor
def MovieQueueRender(u_level_file, u_level_seq_file, u_preset_file, job_name: str, sequence=None):
"""
Parameters
----------
u_level_file : Unreal path
Path to level
u_level_seq_file : Unreal path
Path to sequence
u_preset_file : Unreal path
Path to movie render presets
Returns
-------
None.
"""
subsystem = unreal.get_editor_subsystem(unreal.MoviePipelineQueueSubsystem)
executor = unreal.MoviePipelinePIEExecutor()
queue = subsystem.get_queue()
# queue.delete_all_jobs()
# config render job with movie pipeline config
job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
job.job_name = job_name
job.map = unreal.SoftObjectPath(u_level_file)
job.sequence = unreal.SoftObjectPath(u_level_seq_file)
preset = unreal.EditorAssetLibrary.find_asset_data(u_preset_file).get_asset()
job.set_configuration(preset)
if sequence is not None:
print(unreal.MoviePipelineLibrary.update_job_shot_list_from_sequence(sequence, job))
subsystem.render_queue_with_executor_instance(executor)
print("###################################################\n")
print("rendered")
print("###################################################\n")
def get_file_paths(folder_path, limit=0):
file_paths = []
# Parcourir les assets et ajouter leurs chemins complets ร la liste
asset_paths = unreal.EditorAssetLibrary.list_assets(folder_path)
cnt = 0
# Parcourir les chemins d'assets et ajouter les chemins valides ร la liste
for asset_path in asset_paths:
cnt += 1
if unreal.EditorAssetLibrary.does_asset_exist(asset_path):
file_paths.append(asset_path)
if cnt == limit:
break
return file_paths
def ensure_package_name_format(object_path):
"""
Converts an object path to a package name if necessary.
Unreal Engine requires package names instead of object paths for certain operations.
Args:
object_path (str): The original object path.
Returns:
str: The corrected package name.
"""
if object_path.startswith('/Game/') and ('.' in object_path):
# Converts object path to package name by removing the part after the last dot
return object_path.rsplit('.', 1)[0]
return object_path
def control_by_control(pose_asset_path, sequence, frame=1, size="n", delta=0):
print("Control by control ongoing")
pose_asset_path = ensure_package_name_format(pose_asset_path)
pose_asset = unreal.EditorAssetLibrary.load_asset(pose_asset_path)
print(pose_asset)
rigProxies = unreal.ControlRigSequencerLibrary.get_control_rigs(sequence)
# Filter rigs for specific names
rigs = [rigProxy.control_rig for rigProxy in rigProxies if
rigProxy.control_rig.get_name() in ["Face_ControlBoard_CtrlRig"]]
delta_z = find_delta_z(rigs[0], sequence)
print(f"###############{delta_z}#######################")
# Prepare all transformations in a list comprehension
transformations = [
(
rig,
control.name,
unreal.FrameNumber(frame),
unreal.Transform(
location={
"n": control.global_transform.translation,
"g": control.global_transform.translation, # + unreal.Vector(0, 0, 0),
"p": control.global_transform.translation, # - unreal.Vector(0, 0, delta_z/2),
"tg": control.global_transform.translation, # + unreal.Vector(0, 0, delta_z)
}[size],
rotation=unreal.Rotator(control.local_transform.rotation.x, control.local_transform.rotation.y,
control.local_transform.rotation.z), # from quat to rotator
scale=control.global_transform.scale3d
)
)
for rig in rigs
for control in pose_asset.pose.copy_of_controls
if "teeth" not in str(control.name)
]
name = [control.name for control in pose_asset.pose.copy_of_controls][15]
print([control.global_transform for control in pose_asset.pose.copy_of_controls][15])
# Apply transformations
for rig, control_name, frame_number, control_transform in transformations:
control_transform.translation.x += delta * 100
unreal.ControlRigSequencerLibrary.set_control_rig_world_transform(sequence, rig, control_name, frame_number,
control_transform)
print(unreal.ControlRigSequencerLibrary.get_control_rig_world_transform(sequence, rig, name, frame_number))
for binding in sequence.get_bindings():
for track in binding.get_tracks():
print(track.get_display_name())
if track.get_display_name() == name:
print(f"bingo{name} ")
print(track)
def add_key_to_position_track(sequence, transform, frame):
for binding in sequence.get_bindings():
for track in binding.get_tracks():
if track.get_display_name() == "Transform":
transform_section = track.get_sections()[0]
break
time = unreal.FrameNumber(frame)
all_channels = transform_section.get_all_channels()
transform_dict = {
"Location.X": transform.translation.x,
"Location.Y": transform.translation.y,
"Location.Z": transform.translation.z,
"Rotation.X": transform.rotation.x,
"Rotation.Y": transform.rotation.y,
"Rotation.Z": transform.rotation.z
}
for channel in all_channels:
if str(channel.channel_name) in transform_dict:
value = transform_dict[str(channel.channel_name)]
channel.add_key(time, value)
def add_key_to_focal_track(sequence, foc, frame):
for binding in sequence.get_bindings():
for track in binding.get_tracks():
if track.get_display_name() == 'CurrentFocalLength':
transform_section = track.get_sections()[0]
break
time = unreal.FrameNumber(frame)
all_channels = transform_section.get_all_channels()
for channel in all_channels:
channel.add_key(time, foc)
def add_key_to_focus_track(sequence, foc, frame):
for binding in sequence.get_bindings():
for track in binding.get_tracks():
if track.get_display_name() == "ManualFocusDistance":
transform_section = track.get_sections()[0]
break
time = unreal.FrameNumber(frame)
all_channels = transform_section.get_all_channels()
for channel in all_channels:
channel.add_key(time, foc)
def make_pos_rot_from_angle_and_foc(focal, focus, angle_y, angle_z):
dist = 2.6 * focal # this how to determine the right distance from the camera given thefocal lenght to have teh face full on the picture, given the camera sensor size
cam_angle_y, cam_angle_z, cam_x, cam_y, cam_z = camera_position_angles(angle_y, angle_z, dist)
return {"pos": (cam_x, cam_y, cam_z),
"rot": (0, cam_angle_y, cam_angle_z, 0),
"focal": focal,
"focus": dist + focus
}
def camera_position_angles(angle_y, angle_z, distance):
# Convert angles from degrees to radians
angle_z_rad = np.radians(angle_z) # Rotation around Z-axis affects X-Y plane horizontally
angle_y_rad = np.radians(angle_y) # Rotation around Y-axis affects X-Z plane vertically
# Calculate coordinates based on angles
x = distance * np.cos(angle_z_rad) * np.cos(angle_y_rad)
y = distance * np.sin(angle_z_rad) * np.cos(angle_y_rad)
z = distance * np.sin(angle_y_rad)
# If positive Y-angle means pointing up, Z should decrease with increasing Y-angle
final_z = 149 - z # Subtract from 149 to adjust Z downwards as Y-angle increases
return angle_y, angle_z, -x, -y, final_z,
grand_petit = {
"FA003": "p",
"FA004": "p",
"FA006": "p",
"FA008": "p",
"FA015": "p",
"FA016": "p",
"FB001": "sg",
"FB007": "p",
"FB014": "p",
# nul
"HA001": "tg",
"HA002": "tg",
"HA003": "tg",
"HA004": "tg",
"HA005": "tg",
"HA006": "tg",
"HA007": "tg",
# nul
"HA008": "g",
"HA010": "g",
"HA011": "g",
"HA012": "g",
"HA013": "g",
"HA014": "g",
"HA015": "g",
"HA016": "g",
"HA017": "g",
"HB001": "tg",
"HB002": "g",
"HB003": "g",
"HB004": "g",
"HB005": "g",
"HB006": "g",
"HB007": "g",
"HB008": "g",
"HB009": "g",
"HB010": "g",
"HB011": "g",
"HB012": "g",
"HB013": "g",
"HB014": "g",
"HB015": "g",
"HB016": "g",
"HN001": "tg",
"HN002": "g",
"HN003": "tg",
"HN004": "g",
"HN006": "g",
"HN007": "tg",
"HN008": "g",
"HN009": "g",
"HN010": "g",
"HN011": "g",
"HN013": "tg",
"HN014": "tg",
"HN015": "g",
"HN016": "g",
"HN017": "g",
}
def set_cam_condition(sequence, con_dic, frame, size="n", delta=0):
"""
Parameters
----------
sequence sequence over which we want to operate
con_dic dictionnary of conditions
frame frame over whitch we want to operate
size either normal big small ior very big, allows to predict a delta in the z coordinate
delta delta in the x coordinate
Returns
Set the camera at the right place at the right time with the right orientation
Deals with focal length and focus as well
-------
"""
# set the spatial transform
transform = unreal.Transform()
transform.translation = unreal.Vector(con_dic["pos"][0], con_dic["pos"][1], con_dic["pos"][2])
transform.rotation = unreal.Quat(con_dic["rot"][0], con_dic["rot"][1], con_dic["rot"][2], con_dic["rot"][3])
if size == "p":
transform.translation.z -= 8.234
elif size == "g":
transform.translation.z += 9.468
transform.translation.x += delta * 100
focal = con_dic["focal"]
focus = con_dic["focus"]
add_key_to_position_track(sequence, transform, frame)
add_key_to_focal_track(sequence, focal, frame)
add_key_to_focus_track(sequence, focus, frame)
def get_size_from_identity(iden, grand_petit):
"""
Parameters
----------
iden : name of metahuman
grand_petit dictionary that knows their size
Returns
-------
"""
size = "n" # normal
if iden in grand_petit:
size = grand_petit[iden]
return size # will return either "n" "p" "g" or "tg"
def chose_nth(liste, n=None):
# chose randomly an nth of the samples from this list sorted, 1 sample if n is none
if n == None:
n = len(liste)
return sorted(rd.choices(liste, k=int(len(liste) / n)))
def full_run(iden, n, test=False, delta=0):
"""
Parameters
----------
iden : the name of one metahuman
n : condition divider
test : test or not ?
delta : distance from origin in the x axis
Returns
-------
"""
# create the camera
camera = SetupCineCameraActor(unreal.Vector(0, 80, 149), unreal.Rotator(0, 0, -90))
# load all the emotions asset path
pose_asset_folder = "/project/"
all_pose_asset_path = get_file_paths(pose_asset_folder)
# initiate the basic conditions list
focals = [12, 18, 24, 50, 70]
focuses = [i for i in range(-100, 100, 10)]
z_angles = [i for i in range(-150, -40, 5)]
y_angles = [i for i in range(-30, 30, 5)]
con_dict = {}
# this store the initial condition the one that every character will got through
initial_pose = pose_asset_folder + "Joy-01_Satisfaction" # the most neutral expression
initial_cond = []
# but only if not in test
if test == False:
initial_cond = [
{"emo": initial_pose, "cam": make_pos_rot_from_angle_and_foc(50, 0, y, z)}
for z in z_angles[5:-5]
for y in y_angles[5:-5]
]
# add all the other conditions
initial_cond += [
{"emo": a, "cam": make_pos_rot_from_angle_and_foc(f, foc, y, z)}
for f in chose_nth(focals, n)
for foc in chose_nth(focuses, n)
for z in chose_nth(z_angles, n)
for y in chose_nth(y_angles, n)
for a in chose_nth(all_pose_asset_path, n)
]
# the ditcionnary of condition for our metahuman
con_dict[iden] = initial_cond
# this is important so that add metahuman to seuqence know whereto look for sequences
u_level_seq_file = f"/project/"
# loop through the dictionary
for k, v in con_dict.items():
path = f"/project/{iden}/BP_{iden}" # define path of the metahuman
size = get_size_from_identity(iden, grand_petit)
MH = LoadBlueprint(path, unreal.Vector(delta * 100, 0, 0),
size=size) # place it in a row aligned with it's friends (see enumarate loop with full_run)
length = len(v) # this will tell create sequence the lenght of the sequence it should create
sequ_name = f"sequence_{iden}" # name of teh sequence to be created
sequence = CreateSequence(sequ_name, camera, length) # finally create the seuqnce
add_metahuman_components_to_sequence(sequence, MH,
u_level_seq_file + sequ_name) # this will create a animation track for each drivers of the metahuman
print(r"\n\n############ thank you for your attention ##########")
# loop through te condition and usng their index assign them to the nth
for cnt, e in enumerate(v):
set_cam_condition(sequence, e["cam"], cnt, size, delta)
control_by_control(e["emo"], sequence, cnt, size, delta)
def start_over():
"""
This functions cleans up the project file:
it whipes out all existing sequences and free up the rendering queue
Usefull to make several batches in row
Returns
-------
"""
# find and delete sequences
all_sequence_path = get_file_paths("/project/")
for seq_path in all_sequence_path:
reset_scene(seq_path)
# Wipe the queue
subsystem = unreal.get_editor_subsystem(unreal.MoviePipelineQueueSubsystem)
executor = unreal.MoviePipelinePIEExecutor()
queue = subsystem.get_queue()
queue.delete_all_jobs()
def generate(b: int, e: int, n: int, test: bool):
"""
Parameters
----------
b int : first indice of the identity oto be generated
e : last indice of the identity to be genrated
n : None or int , number by witch the amount of unique condition will be divided, if None the generate 1 value per condition
test : if True will skip the initial condition used in conjunction with None allows to generate only one picture for testing purposes
Returns Nothing
This function will create ร list of all the possible names the metahumans can have
Loop through a portion of them (as defined by b and n) and create a squence for each of them
See full_run for more details
-------
"""
genders = ["F", "H"]
races = ["A", "B", "N"]
id_nb = [f"{i + 1:03}" for i in range(17)]
identities = [g + r + ids for g in genders for r in races for ids in id_nb]
for i, iden in enumerate(identities[b:e]):
full_run(iden, n, test, i)
def render_everything():
u_level_file = "/project/"
all_sequence_path = get_file_paths("/project/")
for i, seq_path in enumerate(all_sequence_path):
MovieQueueRender(u_level_file, seq_path, "/project/", seq_path[-5:])
if __name__ == "__main__":
start = time.time()
start_over()
generate(19, 21, 4, False)
render_everything()
end = time.time()
print(r"\n\n############ pay close attention here!!!!! ##########\n\n")
print(f"temps รฉcoulรฉ : {end - start}s")
print(r"\n\n############ thank you for your attention ###########")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
class ProcessHDA(object):
""" An object that wraps async processing of an HDA (instantiating,
cooking/project/ an HDA), with functions that are called at the
various stages of the process, that can be overridden by subclasses for
custom funtionality:
- on_failure()
- on_complete(): upon successful completion (could be PostInstantiation
if auto cook is disabled, PostProcessing if auto bake is disabled, or
after PostAutoBake if auto bake is enabled.
- on_pre_instantiation(): before the HDA is instantiated, a good place
to set parameter values before the first cook.
- on_post_instantiation(): after the HDA is instantiated, a good place
to set/configure inputs before the first cook.
- on_post_auto_cook(): right after a cook
- on_pre_process(): after a cook but before output objects have been
created/processed
- on_post_processing(): after output objects have been created
- on_post_auto_bake(): after outputs have been baked
Instantiate the processor via the constructor and then call the activate()
function to start the asynchronous process.
"""
def __init__(
self,
houdini_asset,
instantiate_at=unreal.Transform(),
parameters=None,
node_inputs=None,
parameter_inputs=None,
world_context_object=None,
spawn_in_level_override=None,
enable_auto_cook=True,
enable_auto_bake=False,
bake_directory_path="",
bake_method=unreal.HoudiniEngineBakeOption.TO_ACTOR,
remove_output_after_bake=False,
recenter_baked_actors=False,
replace_previous_bake=False,
delete_instantiated_asset_on_completion_or_failure=False):
""" Instantiates an HDA in the specified world/level. Sets parameters
and inputs supplied in InParameters, InNodeInputs and parameter_inputs.
If bInEnableAutoCook is true, cooks the HDA. If bInEnableAutoBake is
true, bakes the cooked outputs according to the supplied baking
parameters.
This all happens asynchronously, with the various output pins firing at
the various points in the process:
- PreInstantiation: before the HDA is instantiated, a good place
to set parameter values before the first cook (parameter values
from ``parameters`` are automatically applied at this point)
- PostInstantiation: after the HDA is instantiated, a good place
to set/configure inputs before the first cook (inputs from
``node_inputs`` and ``parameter_inputs`` are automatically applied
at this point)
- PostAutoCook: right after a cook
- PreProcess: after a cook but before output objects have been
created/processed
- PostProcessing: after output objects have been created
- PostAutoBake: after outputs have been baked
- Completed: upon successful completion (could be PostInstantiation
if auto cook is disabled, PostProcessing if auto bake is disabled,
or after PostAutoBake if auto bake is enabled).
- Failed: If the process failed at any point.
Args:
houdini_asset (HoudiniAsset): The HDA to instantiate.
instantiate_at (Transform): The Transform to instantiate the HDA with.
parameters (Map(Name, HoudiniParameterTuple)): The parameters to set before cooking the instantiated HDA.
node_inputs (Map(int32, HoudiniPublicAPIInput)): The node inputs to set before cooking the instantiated HDA.
parameter_inputs (Map(Name, HoudiniPublicAPIInput)): The parameter-based inputs to set before cooking the instantiated HDA.
world_context_object (Object): A world context object for identifying the world to spawn in, if spawn_in_level_override is null.
spawn_in_level_override (Level): If not nullptr, then the HoudiniAssetActor is spawned in that level. If both spawn_in_level_override and world_context_object are null, then the actor is spawned in the current editor context world's current level.
enable_auto_cook (bool): If true (the default) the HDA will cook automatically after instantiation and after parameter, transform and input changes.
enable_auto_bake (bool): If true, the HDA output is automatically baked after a cook. Defaults to false.
bake_directory_path (str): The directory to bake to if the bake path is not set via attributes on the HDA output.
bake_method (HoudiniEngineBakeOption): The bake target (to actor vs blueprint). @see HoudiniEngineBakeOption.
remove_output_after_bake (bool): If true, HDA temporary outputs are removed after a bake. Defaults to false.
recenter_baked_actors (bool): Recenter the baked actors to their bounding box center. Defaults to false.
replace_previous_bake (bool): If true, on every bake replace the previous bake's output (assets + actors) with the new bake's output. Defaults to false.
delete_instantiated_asset_on_completion_or_failure (bool): If true, deletes the instantiated asset actor on completion or failure. Defaults to false.
"""
super(ProcessHDA, self).__init__()
self._houdini_asset = houdini_asset
self._instantiate_at = instantiate_at
self._parameters = parameters
self._node_inputs = node_inputs
self._parameter_inputs = parameter_inputs
self._world_context_object = world_context_object
self._spawn_in_level_override = spawn_in_level_override
self._enable_auto_cook = enable_auto_cook
self._enable_auto_bake = enable_auto_bake
self._bake_directory_path = bake_directory_path
self._bake_method = bake_method
self._remove_output_after_bake = remove_output_after_bake
self._recenter_baked_actors = recenter_baked_actors
self._replace_previous_bake = replace_previous_bake
self._delete_instantiated_asset_on_completion_or_failure = delete_instantiated_asset_on_completion_or_failure
self._asset_wrapper = None
self._cook_success = False
self._bake_success = False
@property
def asset_wrapper(self):
""" The asset wrapper for the instantiated HDA processed by this node. """
return self._asset_wrapper
@property
def cook_success(self):
""" True if the last cook was successful. """
return self._cook_success
@property
def bake_success(self):
""" True if the last bake was successful. """
return self._bake_success
@property
def houdini_asset(self):
""" The HDA to instantiate. """
return self._houdini_asset
@property
def instantiate_at(self):
""" The transform the instantiate the asset with. """
return self._instantiate_at
@property
def parameters(self):
""" The parameters to set on on_pre_instantiation """
return self._parameters
@property
def node_inputs(self):
""" The node inputs to set on on_post_instantiation """
return self._node_inputs
@property
def parameter_inputs(self):
""" The object path parameter inputs to set on on_post_instantiation """
return self._parameter_inputs
@property
def world_context_object(self):
""" The world context object: spawn in this world if spawn_in_level_override is not set. """
return self._world_context_object
@property
def spawn_in_level_override(self):
""" The level to spawn in. If both this and world_context_object is not set, spawn in the editor context's level. """
return self._spawn_in_level_override
@property
def enable_auto_cook(self):
""" Whether to set the instantiated asset to auto cook. """
return self._enable_auto_cook
@property
def enable_auto_bake(self):
""" Whether to set the instantiated asset to auto bake after a cook. """
return self._enable_auto_bake
@property
def bake_directory_path(self):
""" Set the fallback bake directory, for if output attributes do not specify it. """
return self._bake_directory_path
@property
def bake_method(self):
""" The bake method/target: for example, to actors vs to blueprints. """
return self._bake_method
@property
def remove_output_after_bake(self):
""" Remove temporary HDA output after a bake. """
return self._remove_output_after_bake
@property
def recenter_baked_actors(self):
""" Recenter the baked actors at their bounding box center. """
return self._recenter_baked_actors
@property
def replace_previous_bake(self):
""" Replace previous bake output on each bake. For the purposes of this
node, this would mostly apply to .uassets and not actors.
"""
return self._replace_previous_bake
@property
def delete_instantiated_asset_on_completion_or_failure(self):
""" Whether or not to delete the instantiated asset after Complete is called. """
return self._delete_instantiated_asset_on_completion_or_failure
def activate(self):
""" Activate the process. This will:
- instantiate houdini_asset and wrap it as asset_wrapper
- call on_failure() for any immediate failures
- otherwise bind to delegates from asset_wrapper so that the
various self.on_*() functions are called as appropriate
Returns immediately (does not block until cooking/processing is
complete).
Returns:
(bool): False if activation failed.
"""
# Get the API instance
houdini_api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
if not houdini_api:
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
# Create an empty API asset wrapper
self._asset_wrapper = unreal.HoudiniPublicAPIAssetWrapper.create_empty_wrapper(houdini_api)
if not self._asset_wrapper:
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
# Bind to the wrapper's delegates for instantiation, cooking, baking
# etc events
self._asset_wrapper.on_pre_instantiation_delegate.add_callable(
self._handle_on_pre_instantiation)
self._asset_wrapper.on_post_instantiation_delegate.add_callable(
self._handle_on_post_instantiation)
self._asset_wrapper.on_post_cook_delegate.add_callable(
self._handle_on_post_auto_cook)
self._asset_wrapper.on_pre_process_state_exited_delegate.add_callable(
self._handle_on_pre_process)
self._asset_wrapper.on_post_processing_delegate.add_callable(
self._handle_on_post_processing)
self._asset_wrapper.on_post_bake_delegate.add_callable(
self._handle_on_post_auto_bake)
# Begin the instantiation process of houdini_asset and wrap it with
# self.asset_wrapper
if not houdini_api.instantiate_asset_with_existing_wrapper(
self.asset_wrapper,
self.houdini_asset,
self.instantiate_at,
self.world_context_object,
self.spawn_in_level_override,
self.enable_auto_cook,
self.enable_auto_bake,
self.bake_directory_path,
self.bake_method,
self.remove_output_after_bake,
self.recenter_baked_actors,
self.replace_previous_bake):
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
return True
def _unbind_delegates(self):
""" Unbinds from self.asset_wrapper's delegates (if valid). """
if not self._asset_wrapper:
return
self._asset_wrapper.on_pre_instantiation_delegate.add_callable(
self._handle_on_pre_instantiation)
self._asset_wrapper.on_post_instantiation_delegate.add_callable(
self._handle_on_post_instantiation)
self._asset_wrapper.on_post_cook_delegate.add_callable(
self._handle_on_post_auto_cook)
self._asset_wrapper.on_pre_process_state_exited_delegate.add_callable(
self._handle_on_pre_process)
self._asset_wrapper.on_post_processing_delegate.add_callable(
self._handle_on_post_processing)
self._asset_wrapper.on_post_bake_delegate.add_callable(
self._handle_on_post_auto_bake)
def _check_wrapper(self, wrapper):
""" Checks that wrapper matches self.asset_wrapper. Logs a warning if
it does not.
Args:
wrapper (HoudiniPublicAPIAssetWrapper): the wrapper to check
against self.asset_wrapper
Returns:
(bool): True if the wrappers match.
"""
if wrapper != self._asset_wrapper:
unreal.log_warning(
'[UHoudiniPublicAPIProcessHDANode] Received delegate event '
'from unexpected asset wrapper ({0} vs {1})!'.format(
self._asset_wrapper.get_name() if self._asset_wrapper else '',
wrapper.get_name() if wrapper else ''
)
)
return False
return True
def _handle_on_failure(self):
""" Handle any failures during the lifecycle of the process. Calls
self.on_failure() and then unbinds from self.asset_wrapper and
optionally deletes the instantiated asset.
"""
self.on_failure()
self._unbind_delegates()
if self.delete_instantiated_asset_on_completion_or_failure and self.asset_wrapper:
self.asset_wrapper.delete_instantiated_asset()
def _handle_on_complete(self):
""" Handles completion of the process. This can happen at one of
three stages:
- After on_post_instantiate(), if enable_auto_cook is False.
- After on_post_auto_cook(), if enable_auto_cook is True but
enable_auto_bake is False.
- After on_post_auto_bake(), if both enable_auto_cook and
enable_auto_bake are True.
Calls self.on_complete() and then unbinds from self.asset_wrapper's
delegates and optionally deletes the instantiated asset.
"""
self.on_complete()
self._unbind_delegates()
if self.delete_instantiated_asset_on_completion_or_failure and self.asset_wrapper:
self.asset_wrapper.delete_instantiated_asset()
def _handle_on_pre_instantiation(self, wrapper):
""" Called during pre_instantiation. Sets ``parameters`` on the HDA
and calls self.on_pre_instantiation().
"""
if not self._check_wrapper(wrapper):
return
# Set any parameters specified for the HDA
if self.asset_wrapper and self.parameters:
self.asset_wrapper.set_parameter_tuples(self.parameters)
self.on_pre_instantiation()
def _handle_on_post_instantiation(self, wrapper):
""" Called during post_instantiation. Sets inputs (``node_inputs`` and
``parameter_inputs``) on the HDA and calls self.on_post_instantiation().
Completes execution if enable_auto_cook is False.
"""
if not self._check_wrapper(wrapper):
return
# Set any inputs specified when the node was created
if self.asset_wrapper:
if self.node_inputs:
self.asset_wrapper.set_inputs_at_indices(self.node_inputs)
if self.parameter_inputs:
self.asset_wrapper.set_input_parameters(self.parameter_inputs)
self.on_post_instantiation()
# If not set to auto cook, complete execution now
if not self.enable_auto_cook:
self._handle_on_complete()
def _handle_on_post_auto_cook(self, wrapper, cook_success):
""" Called during post_cook. Sets self.cook_success and calls
self.on_post_auto_cook().
Args:
cook_success (bool): True if the cook was successful.
"""
if not self._check_wrapper(wrapper):
return
self._cook_success = cook_success
self.on_post_auto_cook(cook_success)
def _handle_on_pre_process(self, wrapper):
""" Called during pre_process. Calls self.on_pre_process().
"""
if not self._check_wrapper(wrapper):
return
self.on_pre_process()
def _handle_on_post_processing(self, wrapper):
""" Called during post_processing. Calls self.on_post_processing().
Completes execution if enable_auto_bake is False.
"""
if not self._check_wrapper(wrapper):
return
self.on_post_processing()
# If not set to auto bake, complete execution now
if not self.enable_auto_bake:
self._handle_on_complete()
def _handle_on_post_auto_bake(self, wrapper, bake_success):
""" Called during post_bake. Sets self.bake_success and calls
self.on_post_auto_bake().
Args:
bake_success (bool): True if the bake was successful.
"""
if not self._check_wrapper(wrapper):
return
self._bake_success = bake_success
self.on_post_auto_bake(bake_success)
self._handle_on_complete()
def on_failure(self):
""" Called if the process fails to instantiate or fails to start
a cook.
Subclasses can override this function implement custom functionality.
"""
pass
def on_complete(self):
""" Called if the process completes instantiation, cook and/or baking,
depending on enable_auto_cook and enable_auto_bake.
Subclasses can override this function implement custom functionality.
"""
pass
def on_pre_instantiation(self):
""" Called during pre_instantiation.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_instantiation(self):
""" Called during post_instantiation.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_auto_cook(self, cook_success):
""" Called during post_cook.
Subclasses can override this function implement custom functionality.
Args:
cook_success (bool): True if the cook was successful.
"""
pass
def on_pre_process(self):
""" Called during pre_process.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_processing(self):
""" Called during post_processing.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_auto_bake(self, bake_success):
""" Called during post_bake.
Subclasses can override this function implement custom functionality.
Args:
bake_success (bool): True if the bake was successful.
"""
pass
|
from os import system
import unreal
import os
#get the libraries
editor_util = unreal.EditorUtilityLibrary()
system_lib = unreal.SystemLibrary()
editor_asset = unreal.EditorAssetLibrary()
# Get the selected assets
selected_assets = editor_util.get_selected_assets()
num_assets = len(selected_assets)
celaned = 0
## hard coded parent path
parent_dir = "\\Game"
if num_assets > 0:
asset_path = editor_asset.get_path_name_for_loaded_asset(selected_assets[0])
parent_dir = os.path.dirname(asset_path)
for asset in selected_assets:
#Get the class and clear text name
asset_name = system_lib.get_object_name(asset)
asset_class = asset.get_class()
class_name = system_lib.get_class_display_name(asset_class)
#Assemble new path and relocate
try:
new_path = os.path.join(parent_dir, class_name, asset_name)
editor_asset.rename_loaded_asset(asset, new_path)
celaned += 1
unreal.log("Cleaned up {} to {}".format(asset_name, new_path))
except Exception as err:
unreal.log("Could not move {} to new location {}".format(asset_name, new_path))
unreal.log(err)
unreal.log("Cleaned up {} of {}".format(celaned, num_assets))
|
#!/project/ python3
# -*- coding: utf-8 -*-
"""
Terminal Grounds - Metro Underground Direct Implementation
Unreal Engine 5.6 Level Creation Script
This script directly implements the Metro Underground level
using Unreal Engine Python API and UnrealMCP integration.
Execute this in Unreal Engine Editor Python console or via UnrealMCP
"""
import unreal
import json
import math
class TGMetroUndergroundImplementation:
def __init__(self):
self.level_name = "/project/"
self.world = None
self.actors = []
def create_level(self):
"""Create new level in Unreal Engine"""
print("Creating Terminal Grounds Metro Underground Level...")
# Create new level
level_tool = unreal.EditorLevelUtils()
self.world = unreal.EditorLevelLibrary.new_level(self.level_name)
if not self.world:
print("Failed to create level!")
return False
print(f"Level created: {self.level_name}")
return True
def create_basic_geometry(self):
"""Create basic level geometry using BSP and static meshes"""
print("Creating basic level geometry...")
# Main tunnel geometry
main_tunnel = self.create_tunnel_segment(
start_pos=(-1800, 0, -200),
end_pos=(1800, 0, -200),
width=400,
height=300,
name="MainTransitTunnel"
)
# North service tunnel
north_tunnel = self.create_tunnel_segment(
start_pos=(-800, 800, -150),
end_pos=(800, 1200, -150),
width=200,
height=250,
name="NorthServiceTunnel"
)
# South maintenance level
south_tunnel = self.create_tunnel_segment(
start_pos=(-600, -1000, -300),
end_pos=(600, -600, -300),
width=300,
height=200,
name="SouthMaintenanceLevel"
)
# Central access shaft
self.create_cylindrical_shaft(
position=(0, 0, -100),
height=400,
diameter=150,
name="CentralAccessShaft"
)
print("Basic geometry created successfully!")
def create_tunnel_segment(self, start_pos, end_pos, width, height, name):
"""Create a tunnel segment using BSP geometry"""
# Calculate tunnel parameters
length = math.sqrt((end_pos[0] - start_pos[0])**2 + (end_pos[1] - start_pos[1])**2)
center_pos = (
(start_pos[0] + end_pos[0]) / 2,
(start_pos[1] + end_pos[1]) / 2,
(start_pos[2] + end_pos[2]) / 2
)
# Create BSP brush for tunnel
brush_builder = unreal.CubeBuilder()
brush_builder.x = length
brush_builder.y = width
brush_builder.z = height
# Create the brush actor
brush_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.Brush,
location=unreal.Vector(center_pos[0], center_pos[1], center_pos[2])
)
if brush_actor:
brush_actor.set_actor_label(f"Tunnel_{name}")
self.actors.append(brush_actor)
print(f"Created tunnel: {name}")
return brush_actor
def create_cylindrical_shaft(self, position, height, diameter, name):
"""Create cylindrical access shaft"""
# Use cylinder brush builder
brush_builder = unreal.CylinderBuilder()
brush_builder.z = height
brush_builder.outer_radius = diameter / 2
brush_builder.inner_radius = 0
# Create the brush actor
brush_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.Brush,
location=unreal.Vector(position[0], position[1], position[2])
)
if brush_actor:
brush_actor.set_actor_label(f"Shaft_{name}")
self.actors.append(brush_actor)
print(f"Created shaft: {name}")
return brush_actor
def create_lighting_system(self):
"""Create atmospheric lighting system"""
print("Creating lighting system...")
# Primary directional light
directional_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.DirectionalLight,
location=unreal.Vector(0, 0, 500),
rotation=unreal.Rotator(-45, 0, 0)
)
if directional_light:
directional_light.set_actor_label("AmbientSurfaceLight")
# Configure light properties
light_component = directional_light.get_component_by_class(unreal.DirectionalLightComponent)
if light_component:
light_component.set_intensity(0.3)
light_component.set_light_color(unreal.LinearColor(0.8, 0.9, 1.0))
self.actors.append(directional_light)
print("Created directional light")
# Emergency lighting system
emergency_light_positions = [
(-1600, -150, 50), (-1600, 150, 50),
(-1200, -150, 50), (-1200, 150, 50),
(-800, -150, 50), (-800, 150, 50),
(-400, -150, 50), (-400, 150, 50),
(0, -150, 50), (0, 150, 50),
(400, -150, 50), (400, 150, 50),
(800, -150, 50), (800, 150, 50),
(1200, -150, 50), (1200, 150, 50),
(1600, -150, 50), (1600, 150, 50)
]
for i, pos in enumerate(emergency_light_positions):
point_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PointLight,
location=unreal.Vector(pos[0], pos[1], pos[2])
)
if point_light:
point_light.set_actor_label(f"EmergencyLight_{i:02d}")
light_component = point_light.get_component_by_class(unreal.PointLightComponent)
if light_component:
light_component.set_intensity(800)
light_component.set_light_color(unreal.LinearColor(1.0, 0.4, 0.2))
light_component.set_attenuation_radius(300)
self.actors.append(point_light)
print(f"Created {len(emergency_light_positions)} emergency lights")
# Work lights
work_light_positions = [
(-800, 0, -50, -60, 45),
(0, 0, 200, -90, 0),
(800, 0, -50, -60, -45),
(0, 800, 50, -45, 180),
(0, -800, -150, -30, 0)
]
for i, (x, y, z, pitch, yaw) in enumerate(work_light_positions):
spot_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.SpotLight,
location=unreal.Vector(x, y, z),
rotation=unreal.Rotator(pitch, yaw, 0)
)
if spot_light:
spot_light.set_actor_label(f"WorkLight_{i:02d}")
light_component = spot_light.get_component_by_class(unreal.SpotLightComponent)
if light_component:
light_component.set_intensity(1200)
light_component.set_light_color(unreal.LinearColor(0.9, 0.9, 0.8))
light_component.set_attenuation_radius(500)
light_component.set_outer_cone_angle(45)
self.actors.append(spot_light)
print(f"Created {len(work_light_positions)} work lights")
def create_faction_territories(self):
"""Create faction territorial markers"""
print("Creating faction territorial markers...")
# Faction territory definitions
faction_territories = [
{"name": "Directorate", "pos": (-1200, -200, -100), "color": (0.0, 0.5, 1.0)},
{"name": "Free77", "pos": (-400, 200, -100), "color": (0.8, 0.8, 0.0)},
{"name": "Iron_Scavengers", "pos": (400, -200, -100), "color": (1.0, 0.3, 0.0)},
{"name": "Nomad_Clans", "pos": (1200, 200, -100), "color": (0.5, 1.0, 0.3)},
{"name": "Neutral", "pos": (0, 800, -50), "color": (0.7, 0.7, 0.7)},
{"name": "Contested", "pos": (0, -800, -200), "color": (1.0, 0.0, 1.0)}
]
for territory in faction_territories:
# Create faction marker light
faction_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PointLight,
location=unreal.Vector(territory["pos"][0], territory["pos"][1], territory["pos"][2])
)
if faction_light:
faction_light.set_actor_label(f"FactionMarker_{territory['name']}")
light_component = faction_light.get_component_by_class(unreal.PointLightComponent)
if light_component:
light_component.set_intensity(600)
light_component.set_light_color(unreal.LinearColor(
territory["color"][0],
territory["color"][1],
territory["color"][2]
))
light_component.set_attenuation_radius(200)
self.actors.append(faction_light)
# Create faction territory marker (static mesh)
territory_marker = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
location=unreal.Vector(
territory["pos"][0],
territory["pos"][1],
territory["pos"][2] - 50
)
)
if territory_marker:
territory_marker.set_actor_label(f"Territory_{territory['name']}")
# Set basic cube mesh
mesh_component = territory_marker.get_component_by_class(unreal.StaticMeshComponent)
if mesh_component:
cube_mesh = unreal.EditorAssetLibrary.load_asset("/project/")
if cube_mesh:
mesh_component.set_static_mesh(cube_mesh)
territory_marker.set_actor_scale3d(unreal.Vector(1.0, 1.0, 0.1))
self.actors.append(territory_marker)
print(f"Created {len(faction_territories)} faction territories")
def create_cover_elements(self):
"""Create cover elements throughout the level"""
print("Creating cover elements...")
# Support columns along main tunnel
column_positions = []
for x in range(-1600, 1800, 200):
column_positions.extend([
(x, -100, -200),
(x, 100, -200)
])
for i, pos in enumerate(column_positions):
column = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
location=unreal.Vector(pos[0], pos[1], pos[2])
)
if column:
column.set_actor_label(f"SupportColumn_{i:02d}")
mesh_component = column.get_component_by_class(unreal.StaticMeshComponent)
if mesh_component:
cube_mesh = unreal.EditorAssetLibrary.load_asset("/project/")
if cube_mesh:
mesh_component.set_static_mesh(cube_mesh)
column.set_actor_scale3d(unreal.Vector(0.5, 0.5, 2.8))
self.actors.append(column)
print(f"Created {len(column_positions)} support columns")
# Abandoned metro cars
metro_car_positions = [
(-1000, 0, -200),
(-200, 0, -200),
(600, 0, -200),
(1400, 0, -200)
]
for i, pos in enumerate(metro_car_positions):
metro_car = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
location=unreal.Vector(pos[0], pos[1], pos[2])
)
if metro_car:
metro_car.set_actor_label(f"MetroCar_{i:02d}")
mesh_component = metro_car.get_component_by_class(unreal.StaticMeshComponent)
if mesh_component:
cube_mesh = unreal.EditorAssetLibrary.load_asset("/project/")
if cube_mesh:
mesh_component.set_static_mesh(cube_mesh)
metro_car.set_actor_scale3d(unreal.Vector(1.2, 3.0, 1.8))
self.actors.append(metro_car)
print(f"Created {len(metro_car_positions)} metro cars")
def create_spawn_points(self):
"""Create player spawn points"""
print("Creating spawn points...")
# Team spawn points
team_alpha_spawns = [
(-1600, -400, -180, 45),
(-1600, 400, -180, -45),
(-1400, 0, -180, 0)
]
team_bravo_spawns = [
(1600, -400, -180, 135),
(1600, 400, -180, -135),
(1400, 0, -180, 180)
]
# Create Alpha team spawns
for i, (x, y, z, yaw) in enumerate(team_alpha_spawns):
spawn_point = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PlayerStart,
location=unreal.Vector(x, y, z),
rotation=unreal.Rotator(0, yaw, 0)
)
if spawn_point:
spawn_point.set_actor_label(f"TeamAlpha_Spawn_{i:02d}")
self.actors.append(spawn_point)
# Create Bravo team spawns
for i, (x, y, z, yaw) in enumerate(team_bravo_spawns):
spawn_point = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PlayerStart,
location=unreal.Vector(x, y, z),
rotation=unreal.Rotator(0, yaw, 0)
)
if spawn_point:
spawn_point.set_actor_label(f"TeamBravo_Spawn_{i:02d}")
self.actors.append(spawn_point)
# Solo spawn points
solo_spawns = [
(-800, 600, -130, -90),
(800, -600, -130, 90),
(0, 0, 150, 0),
(-200, 800, -130, -135),
(200, -800, -280, 45)
]
for i, (x, y, z, yaw) in enumerate(solo_spawns):
spawn_point = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PlayerStart,
location=unreal.Vector(x, y, z),
rotation=unreal.Rotator(0, yaw, 0)
)
if spawn_point:
spawn_point.set_actor_label(f"Solo_Spawn_{i:02d}")
self.actors.append(spawn_point)
total_spawns = len(team_alpha_spawns) + len(team_bravo_spawns) + len(solo_spawns)
print(f"Created {total_spawns} spawn points")
def create_extraction_zones(self):
"""Create extraction zones"""
print("Creating extraction zones...")
extraction_zones = [
{"name": "Primary_Central", "pos": (0, 0, 250), "radius": 100},
{"name": "Secondary_North", "pos": (800, 800, 100), "radius": 80},
{"name": "Emergency_South", "pos": (-400, -800, -250), "radius": 60}
]
for zone in extraction_zones:
# Create extraction beacon
beacon = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
location=unreal.Vector(zone["pos"][0], zone["pos"][1], zone["pos"][2])
)
if beacon:
beacon.set_actor_label(f"ExtractionBeacon_{zone['name']}")
mesh_component = beacon.get_component_by_class(unreal.StaticMeshComponent)
if mesh_component:
cylinder_mesh = unreal.EditorAssetLibrary.load_asset("/project/")
if cylinder_mesh:
mesh_component.set_static_mesh(cylinder_mesh)
beacon.set_actor_scale3d(unreal.Vector(
zone["radius"]/100,
zone["radius"]/100,
0.1
))
self.actors.append(beacon)
# Create extraction light
extraction_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PointLight,
location=unreal.Vector(zone["pos"][0], zone["pos"][1], zone["pos"][2] + 50)
)
if extraction_light:
extraction_light.set_actor_label(f"ExtractionLight_{zone['name']}")
light_component = extraction_light.get_component_by_class(unreal.PointLightComponent)
if light_component:
light_component.set_intensity(1000)
light_component.set_light_color(unreal.LinearColor(0.0, 1.0, 0.0))
light_component.set_attenuation_radius(zone["radius"] * 2)
self.actors.append(extraction_light)
print(f"Created {len(extraction_zones)} extraction zones")
def setup_navigation_mesh(self):
"""Setup navigation mesh for AI and pathfinding"""
print("Setting up navigation mesh...")
# Create nav mesh bounds volume
nav_bounds = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.NavMeshBoundsVolume,
location=unreal.Vector(0, 0, 0)
)
if nav_bounds:
nav_bounds.set_actor_label("MainNavigationBounds")
# Scale to cover entire level
nav_bounds.set_actor_scale3d(unreal.Vector(40, 40, 10))
self.actors.append(nav_bounds)
print("Created navigation bounds volume")
# Build navigation mesh
nav_system = unreal.NavigationSystemV1.get_navigation_system(self.world)
if nav_system:
nav_system.build()
print("Navigation mesh build initiated")
def finalize_level(self):
"""Finalize level creation and save"""
print("Finalizing level...")
# Save level
if unreal.EditorLevelLibrary.save_current_level():
print(f"Level saved successfully: {self.level_name}")
else:
print("Failed to save level!")
# Generate summary
print("\n" + "="*60)
print("TERMINAL GROUNDS METRO UNDERGROUND LEVEL COMPLETE")
print("="*60)
print(f"Level Name: {self.level_name}")
print(f"Total Actors Created: {len(self.actors)}")
print("\nLevel Features:")
print("- Multi-level vertical design")
print("- Faction territorial control system")
print("- Multiple extraction zones")
print("- Comprehensive lighting system")
print("- Tactical cover positioning")
print("- Balanced spawn point system")
print("- Navigation mesh support")
print("\nLevel is ready for gameplay testing!")
return True
def build_complete_level(self):
"""Build the complete Metro Underground level"""
print("Starting Terminal Grounds Metro Underground level creation...")
if not self.create_level():
return False
# Create all level elements
self.create_basic_geometry()
self.create_lighting_system()
self.create_faction_territories()
self.create_cover_elements()
self.create_spawn_points()
self.create_extraction_zones()
self.setup_navigation_mesh()
# Finalize
return self.finalize_level()
# Execute level creation
def create_terminal_grounds_level():
"""Main execution function"""
level_creator = TGMetroUndergroundImplementation()
return level_creator.build_complete_level()
# Run if executed directly
if __name__ == "__main__":
create_terminal_grounds_level()
|
import unreal
def get_selected_assets():
"""Get all currently selected assets in the content browser."""
editor_util = unreal.EditorUtilityLibrary()
return editor_util.get_selected_assets()
def duplicate_asset(asset_path, new_name):
"""Duplicate an asset with a new name."""
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
original_asset = unreal.EditorAssetLibrary.load_asset(asset_path)
if original_asset:
return asset_tools.duplicate_asset(new_name, original_asset.get_path_name(), original_asset)
return None
def get_asset_dependencies(asset_path):
"""Get all dependencies of an asset."""
return unreal.EditorAssetLibrary.find_package_referencers_for_asset(asset_path)
def bulk_rename_assets(search_pattern, replace_pattern):
"""Rename multiple assets based on a search and replace pattern."""
selected_assets = get_selected_assets()
for asset in selected_assets:
old_name = asset.get_name()
new_name = old_name.replace(search_pattern, replace_pattern)
if new_name != old_name:
asset_path = asset.get_path_name()
new_path = asset_path.replace(old_name, new_name)
unreal.EditorAssetLibrary.rename_asset(asset_path, new_path)
|
# unreal.AssetToolsHelpers
# https://api.unrealengine.com/project/.html
# unreal.AssetTools
# https://api.unrealengine.com/project/.html
# unreal.EditorAssetLibrary
# https://api.unrealengine.com/project/.html
# All operations can be slow. The editor should not be in play in editor mode. It will not work on assets of the type level.
# Possible Directory Paths:
# '/project/'
# '/project/'
# Possible Asset Paths:
# '/project/.MyAsset'
# '/project/'
# unreal.AssetRenameData
# https://api.unrealengine.com/project/.html
# unreal.Package
# https://api.unrealengine.com/project/.html
# unreal.EditorLoadingAndSavingUtils
# https://api.unrealengine.com/project/.html
# unreal.AssetImportTask
# https://api.unrealengine.com/project/.html
# unreal.AssetTools
# https://api.unrealengine.com/project/.html
# unreal.FbxImportUI
# https://api.unrealengine.com/project/.html
# unreal.FbxMeshImportData
# https://api.unrealengine.com/project/.html
# unreal.FbxStaticMeshImportData
# https://api.unrealengine.com/project/.html
# unreal.FbxSkeletalMeshImportData
# https://api.unrealengine.com/project/.html
# unreal.FbxAssetImportData
# https://api.unrealengine.com/project/.html
# unreal.FbxAnimSequenceImportData
# https://api.unrealengine.com/project/.html
# unreal.FBXAnimationLengthImportType
# https://api.unrealengine.com/project/.html
# unreal.LinearColor
# https://api.unrealengine.com/project/.html
# unreal.Factory
# https://api.unrealengine.com/project/.html
import unreal
# asset_path: str : Path of asset to create
# unique_name: bool : If True, will add a number at the end of the asset name until unique
# asset_class: obj unreal.Class : The asset class
# asset_factory: obj unreal.Factory : The associated factory of the class.
# return: obj : The created asset
def createGenericAsset(asset_path='', unique_name=True, asset_class=None, asset_factory=None):
if unique_name:
asset_path, asset_name = unreal.AssetToolsHelpers.get_asset_tools().create_unique_asset_name(base_package_name=asset_path, suffix='')
if not unreal.EditorAssetLibrary.does_asset_exist(asset_path=asset_path):
path = asset_path.rsplit('/', 1)[0]
name = asset_path.rsplit('/', 1)[1]
return unreal.AssetToolsHelpers.get_asset_tools().create_asset(asset_name=name, package_path=path, asset_class=asset_class, factory=asset_factory)
return unreal.load_asset(asset_path)
# paths: List of str : Asset paths
def showAssetsInContentBrowser(paths=[]):
unreal.EditorAssetLibrary.sync_browser_to_objects(asset_paths=paths)
# paths: List of str : Asset paths
def openAssets(paths=[]):
loaded_assets = [getPackageFromPath(x) for x in paths]
unreal.AssetToolsHelpers.get_asset_tools().open_editor_for_assets(assets=loaded_assets)
# path: str : Directory path
# return: bool : True if the operation succeeds
def createDirectory(path=''):
return unreal.EditorAssetLibrary.make_directory(directory_path=path)
# from_dir: str : Directory path to duplicate
# to_dir: str : Duplicated directory path
# return: bool : True if the operation succeeds
def duplicateDirectory(from_dir='', to_dir=''):
return unreal.EditorAssetLibrary.duplicate_directory(source_directory_path=from_dir, destination_directory_path=to_dir)
# path: str : Directory path
# return: bool : True if the operation succeeds
def deleteDirectory(path=''):
return unreal.EditorAssetLibrary.delete_directory(directory_path=path)
# path: str : Directory path
# return: bool : True if the directory exists
def directoryExist(path=''):
return unreal.EditorAssetLibrary.does_directory_exist(directory_path=path)
# from_dir: str : Directory path to rename
# to_dir: str : Renamed directory path
# return: bool : True if the operation succeeds
def renameDirectory(from_dir='', to_dir=''):
return unreal.EditorAssetLibrary.rename_directory(source_directory_path=from_dir, destination_directory_path=to_dir)
# from_path str : Asset path to duplicate
# to_path: str : Duplicated asset path
# return: bool : True if the operation succeeds
def duplicateAsset(from_path='', to_path=''):
return unreal.EditorAssetLibrary.duplicate_asset(source_asset_path=from_path, destination_asset_path=to_path)
# path: str : Asset path
# return: bool : True if the operation succeeds
def deleteAsset(path=''):
return unreal.EditorAssetLibrary.delete_asset(asset_path_to_delete=path)
# path: str : Asset path
# return: bool : True if the asset exists
def assetExist(path=''):
return unreal.EditorAssetLibrary.does_asset_exist(asset_path=path)
# from_path: str : Asset path to rename
# to_path: str : Renamed asset path
# return: bool : True if the operation succeeds
def renameAsset(from_path='', to_path=''):
return unreal.EditorAssetLibrary.rename_asset(source_asset_path=from_path, destination_asset_path=to_path)
# Note: This function will also work on assets of the type level. (But might be really slow if the level is huge)
# from_path: str : Asset path to duplicate
# to_path: str : Duplicate asset path
# show_dialog: bool : True if you want to show the confirm pop-up
# return: bool : True if the operation succeeds
def duplicateAssetDialog(from_path='', to_path='', show_dialog=True):
splitted_path = to_path.rsplit('/', 1)
asset_path = splitted_path[0]
asset_name = splitted_path[1]
if show_dialog:
return unreal.AssetToolsHelpers.get_asset_tools().duplicate_asset_with_dialog(asset_name=asset_name, package_path=asset_path, original_object=getPackageFromPath(from_path))
else:
return unreal.duplicate_asset.get_asset_tools().duplicate_asset(asset_name=asset_name, package_path=asset_path, original_object=getPackageFromPath(from_path))
# Note: This function will also work on assets of the type level. (But might be really slow if the level is huge)
# from_path: str : Asset path to rename
# to_path: str : Renamed asset path
# show_dialog: bool : True if you want to show the confirm pop-up
# return: bool : True if the operation succeeds
def renameAssetDialog(from_path='', to_path='', show_dialog=True):
splitted_path = to_path.rsplit('/', 1)
asset_path = splitted_path[0]
asset_name = splitted_path[1]
rename_data = unreal.AssetRenameData(asset=getPackageFromPath(from_path), new_package_path=asset_path, new_name=asset_name)
if show_dialog:
return unreal.AssetToolsHelpers.get_asset_tools().rename_assets_with_dialog(assets_and_names=[rename_data])
else:
return unreal.AssetToolsHelpers.get_asset_tools().rename_assets(assets_and_names=[rename_data])
# path: str : Asset path
# return: bool : True if the operation succeeds
def saveAsset(path='', force_save=True):
return unreal.EditorAssetLibrary.save_asset(asset_to_save=path, only_if_is_dirty = not force_save)
# path: str : Directory path
# return: bool : True if the operation succeeds
def saveDirectory(path='', force_save=True, recursive=True):
return unreal.EditorAssetLibrary.save_directory(directory_path=path, only_if_is_dirty=not force_save, recursive=recursive)
# path: str : Asset path
# return: obj : The loaded asset
def getPackageFromPath(path):
return unreal.load_package(name=path)
# return: obj List : The assets that need to be saved
def getAllDirtyPackages():
packages = []
for x in unreal.EditorLoadingAndSavingUtils.get_dirty_content_packages():
packages.append(x)
for x in unreal.EditorLoadingAndSavingUtils.get_dirty_map_packages():
packages.append(x)
return packages
# show_dialog: bool : True if you want to see the confirm pop-up
# return: bool : True if the operation succeeds
def saveAllDirtyPackages(show_dialog=False):
if show_dialog:
return unreal.EditorLoadingAndSavingUtils.save_dirty_packages_with_dialog(save_map_packages=True, save_content_packages=True)
else:
return unreal.EditorLoadingAndSavingUtils.save_dirty_packages(save_map_packages=True, save_content_packages=True)
# show_dialog: bool : True if you want to see the confirm pop-up
# return: bool : True if the operation succeeds
def savePackages(packages=[], show_dialog=False):
if show_dialog:
return unreal.EditorLoadingAndSavingUtils.save_packages_with_dialog(packages_to_save=packages, only_dirty=False) # only_dirty=False :
else: # looks like that it's not
return unreal.EditorLoadingAndSavingUtils.save_packages(packages_to_save=packages, only_dirty=False) # working properly at the moment
# filename: str : Windows file fullname of the asset you want to import
# destination_path: str : Asset path
# option: obj : Import option object. Can be None for assets that does not usually have a pop-up when importing. (e.g. Sound, Texture, etc.)
# return: obj : The import task object
def buildImportTask(filename='', destination_path='', options=None):
task = unreal.AssetImportTask()
task.set_editor_property('automated', True)
task.set_editor_property('destination_name', '')
task.set_editor_property('destination_path', destination_path)
task.set_editor_property('filename', filename)
task.set_editor_property('replace_existing', True)
task.set_editor_property('save', True)
task.set_editor_property('options', options)
return task
# tasks: obj List : The import tasks object. You can get them from buildImportTask()
# return: str List : The paths of successfully imported assets
def executeImportTasks(tasks=[]):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(tasks)
imported_asset_paths = []
for task in tasks:
for path in task.get_editor_property('imported_object_paths'):
imported_asset_paths.append(path)
return imported_asset_paths
# return: obj : Import option object. The basic import options for importing a static mesh
def buildStaticMeshImportOptions():
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_mesh', True)
options.set_editor_property('import_textures', False)
options.set_editor_property('import_materials', False)
options.set_editor_property('import_as_skeletal', False) # Static Mesh
# unreal.FbxMeshImportData
options.static_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.static_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.static_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxStaticMeshImportData
options.static_mesh_import_data.set_editor_property('combine_meshes', True)
options.static_mesh_import_data.set_editor_property('generate_lightmap_u_vs', True)
options.static_mesh_import_data.set_editor_property('auto_generate_collision', True)
return options
# return: obj : Import option object. The basic import options for importing a skeletal mesh
def buildSkeletalMeshImportOptions():
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_mesh', True)
options.set_editor_property('import_textures', True)
options.set_editor_property('import_materials', True)
options.set_editor_property('import_as_skeletal', True) # Skeletal Mesh
# unreal.FbxMeshImportData
options.skeletal_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.skeletal_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.skeletal_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxSkeletalMeshImportData
options.skeletal_mesh_import_data.set_editor_property('import_morph_targets', True)
options.skeletal_mesh_import_data.set_editor_property('update_skeleton_reference_pose', False)
return options
# skeleton_path: str : Skeleton asset path of the skeleton that will be used to bind the animation
# return: obj : Import option object. The basic import options for importing an animation
def buildAnimationImportOptions(skeleton_path=''):
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_animations', True)
options.skeleton = unreal.load_asset(skeleton_path)
# unreal.FbxMeshImportData
options.anim_sequence_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.anim_sequence_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.anim_sequence_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxAnimSequenceImportData
options.anim_sequence_import_data.set_editor_property('animation_length', unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME)
options.anim_sequence_import_data.set_editor_property('remove_redundant_keys', False)
return options
# Cpp ########################################################################################################################################################################################
# return: str List : The asset paths that are currently selected
def getSelectedAssets():
return unreal.CppLib.get_selected_assets()
# asset_paths: str List : The asset paths to select
def setSelectedAssets(asset_paths=[]):
unreal.CppLib.set_selected_assets(asset_paths)
# return: str List : The folder paths that are currently selected
def getSelectedFolders():
return unreal.CppLib.get_selected_folders()
# folder_paths: str List : The asset paths to select
def setSelectedFolders(folder_paths=[]):
unreal.CppLib.set_selected_folders(folder_paths)
# return: obj List : The asset objects that are currently opened in the editor
def getAllOpenedAssets():
return unreal.CppLib.get_assets_opened_in_editor()
# asset_objects: obj List : The asset objects to close
def closeAssets(asset_objects=[]):
unreal.CppLib.close_editor_for_assets(asset_objects)
# Note: If the directory already exists, might need to restart Unreal,
# but it's not needed if the color is applied before creating the folder.
# path: str : Directory path
# color: obj unreal.LinearColor : The color to apply
def setDirectoryColor(path='', color=None):
unreal.CppLib.set_folder_color(path, color)
|
# ๋ ๋ฒจ์ SM Actor์ Material element ๊ฐฏ์๊ฐ 1๊ฐ๋ผ๋ฉด ๋ด๋ถ์ ์๋ ํ๋ผ๋ฏธํฐ ๊ฐ ํ๋ ํฝ์ค
import unreal
selected_assets = unreal.EditorLevelLibrary.get_selected_level_actors()
for actor in selected_assets :
actor_class = actor.get_class()
# SM Actor ๋์
if isinstance(actor, unreal.StaticMeshActor) :
get_smComp = actor.static_mesh_component
get_mi_array = get_smComp.get_materials()
mi_len = len(get_mi_array)
if mi_len == 1 :
get_mi_0 = get_smComp.get_material(0)
# ํ๋ผ๋ฏธํฐ ์ด๋ฆ ๋ฐ ์์น
parameter_name = "EnableUDW"
control_bool_val = False
changeval = unreal.MaterialEditingLibrary.set_material_instance_static_switch_parameter_value(get_mi_0, parameter_name, control_bool_val)
unreal.MaterialEditingLibrary.update_material_instance(get_mi_0)
print(actor.get_name(), ">", get_mi_0.get_name(), ">>", parameter_name, ">>>", control_bool_val)
else :
print(">>Material is more than 1 <<")
else :
print("Not SM")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example for getting the API instance and starting/creating the Houdini
Engine Session.
"""
def run():
# Get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
# Check if there is an existing valid session
if not api.is_session_valid():
# Create a new session
api.create_session()
if __name__ == '__main__':
run()
|
'''
File:
Author:
Date:
Description:
Other Notes:
'''
import unreal
# noinspection PyUnresolvedReferences
class MyClass(object):
def __init__(self, frames_buffer: int) -> None:
self.frame_count = 0
self.max_count = 1000
self.frame_buffer = frames_buffer
def start(self) -> None:
self.slate_post_tick_handle = unreal.register_slate_post_tick_callback(self.tick)
self.frame_count = 0
def tick(self, delta_time: float) -> None:
print(self.frame_count)
self.frame_count += 1
if self.frame_count >= self.max_count:
unreal.unregister_slate_post_tick_callback(
self.slate_post_tick_handle)
elif self.frame_count % self.frame_buffer == 0:
print("Do things for one callback")
frames_bw_ticks = 10
test = MyClass(frames_bw_ticks)
test.start()
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example script for instantiating an asset, setting parameters, cooking it
and iterating over and logging all of its output objects.
"""
_g_wrapper = None
def get_test_hda_path():
return '/project/.pig_head_subdivider_v01'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def on_post_instantiation(in_wrapper):
print('on_post_instantiation')
# in_wrapper.on_post_instantiation_state_exited_delegate_delegate.remove_callable(on_post_instantiation)
# Set parameter values for the next cook
# in_wrapper.set_bool_parameter_value('add_instances', True)
# in_wrapper.set_int_parameter_value('num_instances', 8)
in_wrapper.set_parameter_tuples({
'add_instances': unreal.HoudiniParameterTuple(bool_values=(True, )),
'num_instances': unreal.HoudiniParameterTuple(int32_values=(8, )),
})
# Print all parameter values
param_tuples = in_wrapper.get_parameter_tuples()
print('parameter tuples: {}'.format(len(param_tuples) if param_tuples else 0))
if param_tuples:
for param_tuple_name, param_tuple in param_tuples.items():
print('parameter tuple name: {}'.format(param_tuple_name))
print('\tbool_values: {}'.format(param_tuple.bool_values))
print('\tfloat_values: {}'.format(param_tuple.float_values))
print('\tint32_values: {}'.format(param_tuple.int32_values))
print('\tstring_values: {}'.format(param_tuple.string_values))
# Force a cook/recook
in_wrapper.recook()
def on_post_process(in_wrapper):
print('on_post_process')
# in_wrapper.on_post_processing_delegate.remove_callable(on_post_process)
# Print out all outputs generated by the HDA
num_outputs = in_wrapper.get_num_outputs()
print('num_outputs: {}'.format(num_outputs))
if num_outputs > 0:
for output_idx in range(num_outputs):
identifiers = in_wrapper.get_output_identifiers_at(output_idx)
print('\toutput index: {}'.format(output_idx))
print('\toutput type: {}'.format(in_wrapper.get_output_type_at(output_idx)))
print('\tnum_output_objects: {}'.format(len(identifiers)))
if identifiers:
for identifier in identifiers:
output_object = in_wrapper.get_output_object_at(output_idx, identifier)
output_component = in_wrapper.get_output_component_at(output_idx, identifier)
is_proxy = in_wrapper.is_output_current_proxy_at(output_idx, identifier)
print('\t\tidentifier: {}'.format(identifier))
print('\t\toutput_object: {}'.format(output_object.get_name() if output_object else 'None'))
print('\t\toutput_component: {}'.format(output_component.get_name() if output_component else 'None'))
print('\t\tis_proxy: {}'.format(is_proxy))
print('')
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset, disabling auto-cook of the asset (so we have to
# call wrapper.reCook() to cook it)
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform(), enable_auto_cook=False)
# Bind to the on post instantiation delegate (before the first cook)
_g_wrapper.on_post_instantiation_delegate.add_callable(on_post_instantiation)
# Bind to the on post processing delegate (after a cook and after all
# outputs have been generated in Unreal)
_g_wrapper.on_post_processing_delegate.add_callable(on_post_process)
if __name__ == '__main__':
run()
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example script for instantiating an asset, cooking it and baking an
individual output object.
"""
_g_wrapper = None
def get_test_hda_path():
return '/project/.pig_head_subdivider_v01'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def on_post_process(in_wrapper):
print('on_post_process')
# Print details about the outputs and record the first static mesh we find
sm_index = None
sm_identifier = None
# in_wrapper.on_post_processing_delegate.remove_callable(on_post_process)
num_outputs = in_wrapper.get_num_outputs()
print('num_outputs: {}'.format(num_outputs))
if num_outputs > 0:
for output_idx in range(num_outputs):
identifiers = in_wrapper.get_output_identifiers_at(output_idx)
output_type = in_wrapper.get_output_type_at(output_idx)
print('\toutput index: {}'.format(output_idx))
print('\toutput type: {}'.format(output_type))
print('\tnum_output_objects: {}'.format(len(identifiers)))
if identifiers:
for identifier in identifiers:
output_object = in_wrapper.get_output_object_at(output_idx, identifier)
output_component = in_wrapper.get_output_component_at(output_idx, identifier)
is_proxy = in_wrapper.is_output_current_proxy_at(output_idx, identifier)
print('\t\tidentifier: {}'.format(identifier))
print('\t\toutput_object: {}'.format(output_object.get_name() if output_object else 'None'))
print('\t\toutput_component: {}'.format(output_component.get_name() if output_component else 'None'))
print('\t\tis_proxy: {}'.format(is_proxy))
print('')
if (output_type == unreal.HoudiniOutputType.MESH and
isinstance(output_object, unreal.StaticMesh)):
sm_index = output_idx
sm_identifier = identifier
# Bake the first static mesh we found to the CB
if sm_index is not None and sm_identifier is not None:
print('baking {}'.format(sm_identifier))
success = in_wrapper.bake_output_object_at(sm_index, sm_identifier)
print('success' if success else 'failed')
# Delete the instantiated asset
in_wrapper.delete_instantiated_asset()
global _g_wrapper
_g_wrapper = None
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset with auto-cook enabled
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform())
# Bind to the on post processing delegate (after a cook and after all
# outputs have been generated in Unreal)
_g_wrapper.on_post_processing_delegate.add_callable(on_post_process)
if __name__ == '__main__':
run()
|
import unreal
from CommonFunctions import *
# from importlib import reload
# reload Blueprint
BASE_COLLISION: unreal.Name = unreal.Name("BlockAll")
DECAL_COLLISION: unreal.Name = unreal.Name("NoCollision")
sys_lib = unreal.SystemLibrary()
string_lib = unreal.StringLibrary()
bp_editor_lib = unreal.BlueprintEditorLibrary
staticmesh_subsys = unreal.get_editor_subsystem(unreal.StaticMeshEditorSubsystem)
subobj_subsys = unreal.get_engine_subsystem(unreal.SubobjectDataSubsystem)
selectedAssets = unreal.EditorUtilityLibrary.get_selected_assets()
sk_component_class = unreal.SkeletalMeshComponent.static_class()
sm_component_class = unreal.StaticMeshComponent.static_class()
bp_class=unreal.BlueprintGeneratedClass.static_class()
def get_blueprint_assets(assets):
# filter BPs
blueprints = []
for asset in assets:
assetClass = asset.get_class()
assetClass = sys_lib.get_class_display_name(asset.get_class())
if assetClass == "Blueprint":
blueprints.append(asset)
return blueprints
def get_blueprint_components(blueprint):
# ่ทๅ่ๅพๅญ็ฉไปถๅ่กจ
components = []
root_data_handle = subobj_subsys.k2_gather_subobject_data_for_blueprint(blueprint)
for handle in root_data_handle:
subObject = subobj_subsys.k2_find_subobject_data_from_handle(handle)
component = unreal.SubobjectDataBlueprintFunctionLibrary.get_object(subObject)
if component not in components:
components.append(component)
return components
def set_components_static(components):
class_bp="BlueprintGeneratedClass"
for component in components:
if class_bp not in str(component.get_class()):
component.set_editor_property(
name="mobility", value=unreal.ComponentMobility.STATIC
)
# print(f"{component.get_class()}")
# print(f"mobility {component.get_editor_property(name='mobility')}")
def set_bp_static(assets):
"""ๅจUE ้่ฐ็จ่ฟไธชFunction ๅฏนPrefab้็BP่ฟ่กๅฑๆง่ฎพ็ฝฎ"""
blueprints = get_blueprint_assets(assets)
assetCount = len(blueprints)
taskName = "Batch Processing BP Assets๏ผ "
currentStep = 0
# Slow Task ่ฟๅบฆๆก
with unreal.ScopedSlowTask(assetCount, taskName) as slowTask:
slowTask.make_dialog(True)
for blueprint in blueprints:
# ่ฟๅบฆๆก็ฎๅ่ฟๅบฆ
currentStep += 1
if slowTask.should_cancel():
break
slowTask.enter_progress_frame(
1, taskName + str(currentStep) + "/" + str(assetCount)
)
components = get_blueprint_components(blueprint)
# for component in components:
# print(component.get_class())
set_components_static(components)
if assetCount == 0:
unreal.log_error("selection no Blueprint, aborted. | ๆ้ๆจกๅๆฒกๆBlueprint")
else:
unreal.log(
"{} BPs with its child assets done | ่ๅพๅๅฏนๅบ่ตไบงๅฑๆง่ฎพ็ฝฎๅฎๆ".format(
assetCount
)
)
## =================================================== ##
# testrun
# set_bp_static(selectedAssets)
|
import unreal
import os
unreal.log_warning("running custom_unreal_prescript")
map_path = os.environ.get("UEMAP_PATH")
# preload the map defined by environment variable UEMAP_PATH
# this is done to prevent rendering issues due to unfinished loading
if map_path:
unreal.log_warning(f"preloading map {map_path}")
unreal.get_editor_subsystem(unreal.LevelEditorSubsystem).load_level(map_path)
unreal.log_warning("custom_unreal_prescript finished")
|
# -*- coding: utf-8 -*-
import unreal
import sys
import pydoc
import inspect
import os
import json
from enum import IntFlag
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def has_instance(cls):
return cls in cls._instances
def get_instance(cls):
if cls in cls._instances:
return cls._instances[cls]
return None
def cast(object_to_cast, object_class):
try:
return object_class.cast(object_to_cast)
except:
return None
# short cut for print dir
def d(obj, subString=''):
subString = subString.lower()
for x in dir(obj):
if subString == '' or subString in x.lower():
print(x)
def l(obj, subString='', bPrint = True):
'''
่พๅบ็ฉไฝ่ฏฆ็ปไฟกๆฏ๏ผๅฝๆฐใๅฑๆง๏ผeditorProperty็ญ๏ผๅนถไปฅlogๅฝขๅผ่พๅบ
:param obj: Objectๅฎไพๆ่
็ฑป
:param subString: ่ฟๆปค็จๅญ็ฌฆไธฒ
:return: ๆ
'''
def _simplifyDoc(content):
bracketS, bracketE = content.find('('), content.find(')')
arrow = content.find('->')
funcDocPos = len(content)
endSign = ['--', '\n', '\r']
for s in endSign:
p = content.find(s)
if p != -1 and p < funcDocPos:
funcDocPos = p
funcDoc = content[:funcDocPos]
param = content[bracketS + 1: bracketE].strip()
return funcDoc, param
def _getEditorProperties(content, obj):
lines = content.split('\r')
signFound = False
allInfoFound = False
result = []
for line in lines:
if not signFound and '**Editor Properties:**' in line:
signFound = True
if signFound:
#todo re
nameS, nameE = line.find('``') + 2, line.find('`` ')
if nameS == -1 or nameE == -1:
continue
typeS, typeE = line.find('(') + 1, line.find(')')
if typeS == -1 or typeE == -1:
continue
rwS, rwE = line.find('[') + 1, line.find(']')
if rwS == -1 or rwE == -1:
continue
name = line[nameS: nameE]
type = line[typeS: typeE]
rws = line[rwS: rwE]
descript = line[rwE + 2:]
allInfoFound = True
result.append((name, type, rws, descript))
if signFound:
if not allInfoFound:
unreal.log_warning("not all info found {}".format(obj))
else:
unreal.log_warning("can't find editor properties in {}".format(obj))
return result
if obj == None:
unreal.log_warning("obj == None")
return None
if inspect.ismodule(obj):
return None
ignoreList = ['__delattr__', '__getattribute__', '__hash__', '__init__', '__setattr__']
propertiesNames = []
builtinCallableNames = []
otherCallableNames = []
for x in dir(obj):
if subString == '' or subString in x.lower():
attr = getattr(obj, x)
if callable(attr):
if inspect.isbuiltin(attr): # or inspect.isfunction(attr) or inspect.ismethod(attr):
builtinCallableNames.append(x)
else:
# Not Built-in
otherCallableNames.append(x)
else:
# Properties
propertiesNames.append(x)
# 1 otherCallables
otherCallables = []
for name in otherCallableNames:
descriptionStr = ""
if name == "__doc__":
resultStr = "ignored.." #docๅคช้ฟ๏ผไธ่พๅบ
else:
resultStr = "{}".format(getattr(obj, name))
otherCallables.append([name, (), descriptionStr, resultStr])
# 2 builtinCallables
builtinCallables = []
for name in builtinCallableNames:
attr = getattr(obj, name)
descriptionStr = ""
resultStr = ""
bHasParameter = False
if hasattr(attr, '__doc__'):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
if paramStr == '':
# Method with No params
descriptionStr = docForDisplay[docForDisplay.find(')') + 1:]
if '-> None' not in docForDisplay:
resultStr = "{}".format(attr.__call__())
else:
resultStr = 'skip call'
else:
# ๆๅๅฝๆฐ
descriptionStr = paramStr
bHasParameter = True
resultStr = ""
else:
pass
builtinCallables.append([name, (bHasParameter,), descriptionStr, resultStr])
# 3 properties
editorPropertiesInfos = []
editorPropertiesNames = []
if hasattr(obj, '__doc__') and isinstance(obj, unreal.Object):
editorPropertiesInfos = _getEditorProperties(obj.__doc__, obj)
for name, _, _, _ in editorPropertiesInfos:
editorPropertiesNames.append(name)
properties = []
for name in propertiesNames:
descriptionStr = ""
if name == "__doc__":
resultStr = "ignored.." #docๅคช้ฟ๏ผไธ่พๅบ
else:
try:
resultStr = "{}".format(getattr(obj, name))
except:
resultStr = ""
isAlsoEditorProperty = name in editorPropertiesNames #ๆฏๅฆๅๆถๆฏEditorProprtyๅๆถไนๆฏproperty
properties.append([name, (isAlsoEditorProperty,), descriptionStr, resultStr])
# 4 editorProperties
editorProperties = []
propertyAlsoEditorPropertyCount = 0
for info in editorPropertiesInfos:
name, type, rw, descriptionStr = info
if subString == '' or subString in name.lower(): #่ฟๆปคๆไธ้่ฆ็
try:
value = eval('obj.get_editor_property("{}")'.format(name))
except:
value = ""
descriptionStr = "[{}]".format(rw)
resultStr = "{}".format(value)
isAlsoProperty = name in propertiesNames
if isAlsoProperty:
propertyAlsoEditorPropertyCount += 1
editorProperties.append( [name, (isAlsoProperty,), descriptionStr, resultStr])
strs = []
strs.append("Detail: {}".format(obj))
formatWidth = 70
for info in otherCallables:
name, flags, descriptionStr, resultStr = info
# line = "\t{} {}{}{}".format(name, descriptionStr, " " *(formatWidth -1 - len(name) - len(descriptionStr)), resultStr)
line = "\t{} {}".format(name, descriptionStr)
line += "{}{}".format(" " * (formatWidth-len(line)+1-4), resultStr)
strs.append(line)
for info in builtinCallables:
name, flags, descriptionStr, resultStr = info
if flags[0]: # ๆๅๅฝๆฐ
# line = "\t{}({}) {} {}".format(name, descriptionStr, " " * (formatWidth - 5 - len(name) - len(descriptionStr)), resultStr)
line = "\t{}({})".format(name, descriptionStr)
line += "{}{}".format(" " * (formatWidth-len(line)+1-4), resultStr)
else:
# line = "\t{}() {} |{}| {}".format(name, descriptionStr, "-" * (formatWidth - 7 - len(name) - len(descriptionStr)), resultStr)
line = "\t{}() {}".format(name, descriptionStr)
line += "|{}| {}".format("-" * (formatWidth-len(line)+1-4-3), resultStr)
strs.append(line)
for info in properties:
name, flags, descriptionStr, resultStr = info
sign = "**" if flags[0] else ""
# line = "\t\t{} {} {}{}{}".format(name, sign, descriptionStr, " " * (formatWidth - 6 - len(name) -len(sign) - len(descriptionStr)), resultStr)
line = "\t\t{} {} {}".format(name, sign, descriptionStr)
line += "{}{}".format(" " * (formatWidth-len(line)+2-8), resultStr)
strs.append(line)
strs.append("Special Editor Properties:")
for info in editorProperties:
name, flags, descriptionStr, resultStr = info
if flags[0]:
pass # ๅทฒ็ป่พๅบ่ฟ่ทณ่ฟ
else:
sign = "*"
# line = "\t\t{0} {1} {3}{4} {2}".format(name, sign, descriptionStr, " " * (formatWidth - 3 - len(name) -len(sign) ), resultStr) #descriptionStr ไธญๆฏ[rw]ๆพๅฐๆๅๆพ็คบ
line = "\t\t{} {}".format(name, sign)
line += "{}{} {}".format(" " * (formatWidth-len(line)+2-8), resultStr, descriptionStr) # descriptionStr ไธญๆฏ[rw]ๆพๅฐๆๅๆพ็คบ
strs.append(line)
if bPrint:
for l in strs:
print(l)
print("'*':Editor Property, '**':Editor Property also object attribute.")
print("{}: matched, builtinCallable: {} otherCallables: {} prop: {} EditorProps: {} both: {}".format(obj
, len(builtinCallables), len(otherCallables), len(properties), len(editorProperties), propertyAlsoEditorPropertyCount))
return otherCallables, builtinCallables, properties, editorProperties
# short cut for print type
def t(obj):
print(type(obj))
# unreal type to Python dict
def ToJson(v):
tp = type(v)
if tp == unreal.Transform:
result = {'translation': ToJson(v.translation), 'rotation': ToJson(v.rotation), 'scale3d': ToJson(v.scale3d)}
return result
elif tp == unreal.Vector:
return {'x': v.x, 'y': v.y, 'z': v.z}
elif tp == unreal.Quat:
return {'x': v.x, 'y': v.y, 'z': v.z, 'w': v.w}
else:
print("Error type: " + str(tp) + " not implemented.")
return None
def get_selected_comps():
return unreal.PythonBPLib.get_selected_components()
def get_selected_comp():
comps = unreal.PythonBPLib.get_selected_components()
return comps[0] if len(comps) > 0 else None
def get_selected_asset():
selected = unreal.PythonBPLib.get_selected_assets_paths()
if selected:
return unreal.load_asset(unreal.PythonBPLib.get_selected_assets_paths()[0])
else:
return None
def get_selected_assets():
assets = []
for path in unreal.PythonBPLib.get_selected_assets_paths():
asset = unreal.load_asset(path)
if (asset != None):
assets.append(asset)
return assets
def get_selected_actors():
return unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
def get_selected_actor():
actors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
return actors[0] if len(actors) > 0 else None
def set_preview_es31():
unreal.PythonBPLib.set_preview_platform("GLSL_ES3_1_ANDROID", "ES3_1")
def set_preview_sm5():
unreal.PythonBPLib.set_preview_platform("", "SM5")
# todo: create export tools for create help/dir to file
def export_dir(filepath, cls):
f = open(filepath, 'w')
sys.stdout = f
for x in sorted(dir(cls)):
print(x)
sys.stdout = sys.__stdout__
f.close()
def export_help(filepath, cls):
f = open(filepath, 'w')
sys.stdout = f
pydoc.help(cls)
sys.stdout = sys.__stdout__
f.close()
# ไฟฎๆนsite.py ๆไปถไธญ็Encoding
def set_default_encoding(encodingStr):
pythonPath = os.path.dirname(sys.path[0])
if not os.path.exists(pythonPath):
unreal.PythonBPLib.message_dialog("can't find python folder: {}".format(pythonPath), "Warning")
return
sitePyPath = pythonPath + "/project/.py"
if not os.path.exists(sitePyPath):
unreal.PythonBPLib.message_dialog("can't find site.py: {}".format(sitePyPath), "Warning")
return
#็ฎๅๆฅๆพๅญ็ฌฆไธฒๆฟๆข
with open(sitePyPath, "r") as f:
lines = f.readlines()
startLine = -1
endLine = -1
for i in range(len(lines)):
if startLine == -1 and lines[i][:len('def setencoding():')] == 'def setencoding():':
startLine = i
continue
if endLine == -1 and startLine > -1 and lines[i].startswith('def '):
endLine = i
print("startLine: {} endLine: {}".format(startLine, endLine))
changedLineCount = 0
if -1 < startLine and startLine < endLine:
linePosWithIf = []
for i in range(startLine + 1, endLine):
if lines[i].lstrip().startswith('if '):
linePosWithIf.append(i)
print(lines[i])
if len(linePosWithIf) != 4:
unreal.PythonBPLib.message_dialog("Find pos failed: {}".format(sitePyPath), "Warning")
print(linePosWithIf)
return
lines[linePosWithIf[2]] = lines[linePosWithIf[2]].replace("if 0", "if 1") # ็ฎๅไฟฎๆน็ฌฌไธไธชifๆๅจ่ก็ๅ
ๅฎน
changedLineCount += 1
for i in range(linePosWithIf[2] + 1, linePosWithIf[3]):
line = lines[i]
if "encoding=" in line.replace(" ", ""):
s = line.find('"')
e = line.find('"', s+1)
if s > 0 and e > s:
lines[i] = line[:s+1] + encodingStr + line[e:]
changedLineCount += 1
break
if changedLineCount == 2:
with open(sitePyPath, 'w') as f:
f.writelines(lines)
unreal.PythonBPLib.notification("Success: {}".format(sitePyPath), 0)
currentEncoding = sys.getdefaultencoding()
if currentEncoding == encodingStr:
unreal.PythonBPLib.notification("ๅทฒๅฐdefault encoding่ฎพ็ฝฎไธบ{}".format(currentEncoding), 0)
else:
unreal.PythonBPLib.message_dialog("ๅทฒๅฐdefault encoding่ฎพ็ฝฎไธบ{}๏ผ้่ฆ้ๅฏ็ผ่พๅจไปฅไพฟ็ๆ".format(encodingStr), "Warning")
else:
unreal.PythonBPLib.message_dialog("Find content failed: {}".format(sitePyPath), "Warning")
def get_actors_at_location(location, error_tolerance):
allActors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
result = [_actor for _actor in allActors if _actor.get_actor_location().is_near_equal(location, error_tolerance)]
return result
def select_actors_at_location(location, error_tolerance, actorTypes=None):
actors = get_actors_at_location(location, error_tolerance)
if len(actors) > 1:
print("Total {} actor(s) with the same locations.".format(len(actors)))
if actorTypes is not None:
actors = [actor for actor in actors if type(actor) in actorTypes]
unreal.get_editor_subsystem(unreal.EditorActorSubsystem).set_selected_level_actors(actors)
return actors
else:
print("None actor with the same locations.")
return []
def select_actors_with_same_location(actor, error_tolerance):
if actor is not None:
actors = select_actors_at_location(actor.get_actor_location(), error_tolerance, [unreal.StaticMeshActor, unreal.SkeletalMeshActor])
return actors
else:
print("actor is None.")
return []
def get_chameleon_tool_instance(json_name):
found_count = 0
result = None
for var in globals():
if hasattr(var, "jsonPath") and hasattr(var, "data"):
if isinstance(var.data, unreal.ChameleonData):
if var.jsonPath.endswith(json_name):
found_count += 1
result = var
if found_count == 1:
return result
if found_count > 1:
unreal.log_warning(f"Found Multi-ToolsInstance by name: {json_name}, count: {found_count}")
return None
#
# Flags describing an object instance
#
class EObjectFlags(IntFlag):
# Do not add new flags unless they truly belong here. There are alternatives.
# if you change any the bit of any of the RF_Load flags, then you will need legacy serialization
RF_NoFlags = 0x00000000, #< No flags, used to avoid a cast
# This first group of flags mostly has to do with what kind of object it is. Other than transient, these are the persistent object flags.
# The garbage collector also tends to look at these.
RF_Public =0x00000001, #< Object is visible outside its package.
RF_Standalone =0x00000002, #< Keep object around for editing even if unreferenced.
RF_MarkAsNative =0x00000004, #< Object (UField) will be marked as native on construction (DO NOT USE THIS FLAG in HasAnyFlags() etc)
RF_Transactional =0x00000008, #< Object is transactional.
RF_ClassDefaultObject =0x00000010, #< This object is its class's default object
RF_ArchetypeObject =0x00000020, #< This object is a template for another object - treat like a class default object
RF_Transient =0x00000040, #< Don't save object.
# This group of flags is primarily concerned with garbage collection.
RF_MarkAsRootSet =0x00000080, #< Object will be marked as root set on construction and not be garbage collected, even if unreferenced (DO NOT USE THIS FLAG in HasAnyFlags() etc)
RF_TagGarbageTemp =0x00000100, #< This is a temp user flag for various utilities that need to use the garbage collector. The garbage collector itself does not interpret it.
# The group of flags tracks the stages of the lifetime of a uobject
RF_NeedInitialization =0x00000200, #< This object has not completed its initialization process. Cleared when ~FObjectInitializer completes
RF_NeedLoad =0x00000400, #< During load, indicates object needs loading.
RF_KeepForCooker =0x00000800, #< Keep this object during garbage collection because it's still being used by the cooker
RF_NeedPostLoad =0x00001000, #< Object needs to be postloaded.
RF_NeedPostLoadSubobjects =0x00002000, #< During load, indicates that the object still needs to instance subobjects and fixup serialized component references
RF_NewerVersionExists =0x00004000, #< Object has been consigned to oblivion due to its owner package being reloaded, and a newer version currently exists
RF_BeginDestroyed =0x00008000, #< BeginDestroy has been called on the object.
RF_FinishDestroyed =0x00010000, #< FinishDestroy has been called on the object.
# Misc. Flags
RF_BeingRegenerated =0x00020000, #< Flagged on UObjects that are used to create UClasses (e.g. Blueprints) while they are regenerating their UClass on load (See FLinkerLoad::CreateExport()), as well as UClass objects in the midst of being created
RF_DefaultSubObject =0x00040000, #< Flagged on subobjects that are defaults
RF_WasLoaded =0x00080000, #< Flagged on UObjects that were loaded
RF_TextExportTransient =0x00100000, #< Do not export object to text form (e.g. copy/paste). Generally used for sub-objects that can be regenerated from data in their parent object.
RF_LoadCompleted =0x00200000, #< Object has been completely serialized by linkerload at least once. DO NOT USE THIS FLAG, It should be replaced with RF_WasLoaded.
RF_InheritableComponentTemplate = 0x00400000, #< Archetype of the object can be in its super class
RF_DuplicateTransient =0x00800000, #< Object should not be included in any type of duplication (copy/paste, binary duplication, etc.)
RF_StrongRefOnFrame =0x01000000, #< References to this object from persistent function frame are handled as strong ones.
RF_NonPIEDuplicateTransient =0x02000000, #< Object should not be included for duplication unless it's being duplicated for a PIE session
RF_Dynamic =0x04000000, #< Field Only. Dynamic field. UE_DEPRECATED(5.0, "RF_Dynamic should no longer be used. It is no longer being set by engine code.") - doesn't get constructed during static initialization, can be constructed multiple times # @todo: BP2CPP_remove
RF_WillBeLoaded =0x08000000, #< This object was constructed during load and will be loaded shortly
RF_HasExternalPackage =0x10000000, #< This object has an external package assigned and should look it up when getting the outermost package
# RF_Garbage and RF_PendingKill are mirrored in EInternalObjectFlags because checking the internal flags is much faster for the Garbage Collector
# while checking the object flags is much faster outside of it where the Object pointer is already available and most likely cached.
# RF_PendingKill is mirrored in EInternalObjectFlags because checking the internal flags is much faster for the Garbage Collector
# while checking the object flags is much faster outside of it where the Object pointer is already available and most likely cached.
RF_PendingKill = 0x20000000, #< Objects that are pending destruction (invalid for gameplay but valid objects). UE_DEPRECATED(5.0, "RF_PendingKill should not be used directly. Make sure references to objects are released using one of the existing engine callbacks or use weak object pointers.") This flag is mirrored in EInternalObjectFlags as PendingKill for performance
RF_Garbage =0x40000000, #< Garbage from logical point of view and should not be referenced. UE_DEPRECATED(5.0, "RF_Garbage should not be used directly. Use MarkAsGarbage and ClearGarbage instead.") This flag is mirrored in EInternalObjectFlags as Garbage for performance
RF_AllocatedInSharedPage =0x80000000, #< Allocated from a ref-counted page shared with other UObjects
class EMaterialValueType(IntFlag):
MCT_Float1 = 1,
MCT_Float2 = 2,
MCT_Float3 = 4,
MCT_Float4 = 8,
MCT_Texture2D = 1 << 4,
MCT_TextureCube = 1 << 5,
MCT_Texture2DArray = 1 << 6,
MCT_TextureCubeArray = 1 << 7,
MCT_VolumeTexture = 1 << 8,
MCT_StaticBool = 1 << 9,
MCT_Unknown = 1 << 10,
MCT_MaterialAttributes = 1 << 11,
MCT_TextureExternal = 1 << 12,
MCT_TextureVirtual = 1 << 13,
MCT_VTPageTableResult = 1 << 14,
MCT_ShadingModel = 1 << 15,
MCT_Strata = 1 << 16,
MCT_LWCScalar = 1 << 17,
MCT_LWCVector2 = 1 << 18,
MCT_LWCVector3 = 1 << 19,
MCT_LWCVector4 = 1 << 20,
MCT_Execution = 1 << 21,
MCT_VoidStatement = 1 << 22,
def guess_instance_name(json_file_path):
try:
with open(json_file_path, 'r', encoding='utf-8') as f:
py_cmd = json.load(f).get("InitPyCmd", "")
print(next(line[:line.find('=')].strip() for line in py_cmd.split(";") if "%jsonpath" in line.lower()))
except (FileNotFoundError, KeyError) as e:
print(f"guess_instance_name failed: {e}")
|
import unreal
def rename_assets(search_pattern, replace_pattern, use_case):
# Instances of Unreal classes
system_lib = unreal.SystemLibrary()
editor_util = unreal.EditorUtilityLibrary()
string_lib = unreal.StringLibrary()
# Get the selected assets
selected_assets = editor_util.get_selected_assets()
num_assets = len(selected_assets)
replaced = 0
unreal.log(f"Selected {num_assets} assets")
# Loop through each selected asset
for asset in selected_assets:
asset_name = system_lib.get_object_name(asset)
# Check if the asset name contains a specific substring to be replaced
if string_lib.contains(asset_name, search_pattern, use_case=use_case):
search_case = unreal.SearchCase.CASE_SENSITIVE if use_case else unreal.SearchCase.IGNORE_CASE
replaced_name = string_lib.replace(asset_name, search_pattern, replace_pattern, search_case)
editor_util.rename_asset(asset, replaced_name)
replaced += 1
unreal.log(f"Renamed '{asset_name}' to '{replaced_name}'")
else:
unreal.log(f"No '{search_pattern}' found in '{asset_name}'")
unreal.log(f"Replaced {replaced} of {num_assets} assets")
rename_assets("material", "Mat", True)
|
from langchain_community.llms import LlamaCpp
from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import ChatPromptTemplate
from llm_manager import LLMManager
import json
import unreal
class GameDimensionGenerator:
"""Handles querying the LLM to generate Minesweeper dimensions."""
def __init__(self):
self.llm_manager = LLMManager()
self.model_path = self.llm_manager.get_model_path()
self.llm = LlamaCpp(
model_path=self.model_path,
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
verbose=True,
)
# Define JSON response schema
self.response_schemas = [
ResponseSchema(name="rows", description="Number of rows in the grid."),
ResponseSchema(name="columns", description="Number of columns in the grid."),
ResponseSchema(name="mines", description="Number of mines."),
]
self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas)
self.format_instructions = self.output_parser.get_format_instructions()
# Prompt template
self.custom_prompt = ChatPromptTemplate.from_template(
f"""You are a game dimension generator for Minesweeper.
Extract grid dimensions and mine count.
Return only valid JSON with keys: "rows", "columns", "mines".
{self.format_instructions}
User request: {{input}}
Answer:"""
)
self.chain = LLMChain(llm=self.llm, prompt=self.custom_prompt)
def generate_dimensions(self, query: str) -> dict:
"""Queries the LLM and returns parsed JSON."""
raw_output = self.chain.run({"input": query})
try:
parsed = self.output_parser.parse(raw_output)
return parsed
except Exception as e:
unreal.log_error(f"Parsing failed: {e}")
return {}
if __name__ == "__main__":
generator = GameDimensionGenerator()
print(json.dumps(generator.generate_dimensions("Create an expert level Minesweeper game"), indent=4))
|
"""
This script describes the basic usage of the USD Stage Actor and USD Stage Editor.
"""
import os
import unreal
from pxr import Usd, UsdUtils, UsdGeom, Gf
ROOT_LAYER_FILENAME = r"/project/.usda"
DESTINATION_CONTENT_PATH = r"/project/"
# Stage Actors can be spawned on the UE level like this.
# If the Stage Editor is open when this happens, it will automatically attach to the last actor spawned.
editor_actor_subsystem = unreal.get_editor_subsystem(unreal.EditorActorSubsystem)
stage_actor = editor_actor_subsystem.spawn_actor_from_class(unreal.UsdStageActor, unreal.Vector())
# You can open a stage and interact with it by manipulating the Stage Actor directly:
stage_actor.set_editor_property('root_layer', unreal.FilePath(ROOT_LAYER_FILENAME))
# Any stage opened by a Stage Actor will be opened within the UsdUtils' stage cache.
# This means you can retrieve the same stage via Python from it:
stage = None
for s in UsdUtils.StageCache.Get().GetAllStages():
if s.GetRootLayer().GetDisplayName() == os.path.basename(ROOT_LAYER_FILENAME):
stage = s
break
# Interacting with the stage via Python will update the UE level automatically:
prim = stage.GetPrimAtPath("/Cup")
xform_api = UsdGeom.XformCommonAPI(prim)
trans, _, _, _, _ = xform_api.GetXformVectors(Usd.TimeCode.Default())
delta = Gf.Vec3d(100, 100, 100)
xform_api.SetTranslate(trans + delta)
# You can also toggle variants in this way, and both the USD Stage Editor and the UE level will update automatically:
prim = stage.GetPrimAtPath("/PrimWithALODVariantSet")
variant_sets = prim.GetVariantSets()
if variant_sets.HasVariantSet("LOD"):
var_set = variant_sets.GetVariantSet("LOD")
for var in var_set.GetVariantNames():
print(f"Setting variant set '{var_set.GetName()}' with variant '{var}'")
var_set.SetVariantSelection(var)
# Alternatively it is possible to interact with the USD Stage Editor via Python.
# Here are some examples of operations you can do. Please consult the documentation for the full list
unreal.UsdStageEditorLibrary.open_stage_editor()
unreal.UsdStageEditorLibrary.file_open(ROOT_LAYER_FILENAME)
other_stage_actor = unreal.UsdStageEditorLibrary.get_attached_stage_actor()
unreal.UsdStageEditorLibrary.set_attached_stage_actor(stage_actor)
selected_prims = unreal.UsdStageEditorLibrary.get_selected_prim_paths()
# You can even trigger the Actions->Import button from here, as a way of importing
# the current state of the opened stage:
options = unreal.UsdStageImportOptions()
options.import_actors = True
options.import_geometry = True
options.import_skeletal_animations = True
options.import_level_sequences = True
options.import_materials = True
options.prim_path_folder_structure = False
options.prims_to_import = [
"/ParentA",
"/project/"
]
unreal.UsdStageEditorLibrary.actions_import(DESTINATION_CONTENT_PATH, options)
|
# /project/
# @CBgameDev Optimisation Script - Log Redirects
# /project/
import unreal
import os
EditAssetLib = unreal.EditorAssetLibrary()
workingPath = "/Game/" # Using the root directory
notepadFilePath = os.path.dirname(__file__) + "//PythonOptimiseLog.txt"
allAssets = EditAssetLib.list_assets(workingPath, True, False)
selectedAssetsPath = workingPath
LogStringsArray = []
numOfOptimisations = 0
with unreal.ScopedSlowTask(len(allAssets), selectedAssetsPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
_assetData = EditAssetLib.find_asset_data(asset)
_assetName = _assetData.get_asset().get_name()
_assetPathName = _assetData.get_asset().get_path_name()
_assetClassName = _assetData.get_asset().get_class().get_name()
if _assetClassName == "ObjectRedirector":
LogStringsArray.append(" %s ------------> At Path: %s \n" % (_assetName, _assetPathName))
# unreal.log("Asset Name: %s Path: %s \n" % (_assetName, _assetPathName))
numOfOptimisations += 1
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
# Write results into a log file
# /project/
TitleOfOptimisation = "Log Redirects"
DescOfOptimisation = "Searches entire project for redirects that need fixing up (only logs doesnt perform the fix-up)"
SummaryMessageIntro = "-- Redirects That Could Be Fixed Up --"
if unreal.Paths.file_exists(notepadFilePath): # Check if txt file already exists
os.remove(notepadFilePath) # if does remove it
# Create new txt file and run INTRO TEXT
file = open(notepadFilePath, "a+") # we should only do this if have a count?
file.write("OPTIMISING SCRIPT by @CBgameDev \n")
file.write("==================================================================================================== \n")
file.write(" SCRIPT NAME: %s \n" % TitleOfOptimisation)
file.write(" DESCRIPTION: %s \n" % DescOfOptimisation)
file.write("==================================================================================================== \n \n")
if numOfOptimisations <= 0:
file.write(" -- NONE FOUND -- \n \n")
else:
for i in range(len(LogStringsArray)):
file.write(LogStringsArray[i])
# Run summary text
file.write("\n")
file.write("======================================================================================================= \n")
file.write(" SUMMARY: \n")
file.write(" %s \n" % SummaryMessageIntro)
file.write(" Found: %s \n \n" % numOfOptimisations)
file.write("======================================================================================================= \n")
file.write(" Logged to %s \n" % notepadFilePath)
file.write("======================================================================================================= \n")
file.close()
os.startfile(notepadFilePath) # Trigger the notepad file to open
|
import unreal
import time
from airsim import *
import airsim
import numpy as np
import os
import ParametersClass as pc
import pandas as pd
import ImageComparisonProcessor as icp
SAMPLING_RATE = 20
#Should possibly change to a path that we decide?
# dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "\SceneImages")
def test_params(thermal_mat_inst):
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"cold_heat", -2)
def set_params(params, test_mode, thermal_mat_inst):
if (test_mode == 1):
blend_weight = params[0]
brightness = params[1]
contrast = params[2]
cold_brightness_multiplier = params[3]
cold_power = params[4]
hot_brightness_multiplier = params[5]
hot_power = params[6]
sky_heat = params[7]
fire_heat = params[8]
ground_heat_correction_strength = params[9]
ground_heat_offset = params[10]
person_heat_multiplier = params[11]
target_ground_heat = params[12]
tree_correction_strength = params[13]
target_tree_heat = params[14]
vehicle_heat_multiplier = params[15]
#Set the parameters
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"blend_weight", blend_weight)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"Brightness", brightness)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"Contrast", contrast)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"cold_brightness_multiplier", cold_brightness_multiplier)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"cold_power", cold_power)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"hot_brightness_multiplier", hot_brightness_multiplier)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"hot_power", hot_power)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"sky_heat", sky_heat)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"Fire_Heat", fire_heat)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"ground_heat_correction_strength", ground_heat_correction_strength)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"ground_heat_offset", ground_heat_offset)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"person_heat_multiplier", person_heat_multiplier)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"target_ground_heat", target_ground_heat)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"tree_correction_strength", tree_correction_strength)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"target_tree_heat", target_tree_heat)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"vehicle_heat_multiplier", vehicle_heat_multiplier)
elif (test_mode == 2):
blend_weight = params[0]
brightness = params[1]
contrast = params[2]
cold_brightness_multiplier = params[3]
cold_power = params[4]
hot_brightness_multiplier = params[5]
light_bulb_heat_multiplier = params[6]
#Set the parameters
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"blend_weight", blend_weight)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"Brightness", brightness)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"Contrast", contrast)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"cold_brightness_multiplier", cold_brightness_multiplier)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"cold_power", cold_power)
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(thermal_mat_inst,"hot_brightness_multiplier", hot_brightness_multiplier)
#Unsure about the light bulb multiplier
return
def capture_image(camera_name, client, best_accuracies, reference_image, df, image_index, params, test_mode, dir):
time.sleep(0.1)
responses = client.simGetImages([ImageRequest(camera_name, 10, False, False)])
print('Retrieved image: %d' % len(responses))
print ("Saving images to %s" % dir)
#Do the file naming with the -'s based on TEST_MODE
filename = pc.test_name(test_mode) + "-" + str(image_index)
file_path = dir + "\\" + filename
response = responses[0]
# get numpy array
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)
# reshape array to 4 channel image array H X W X 4
img_rgb = img1d.reshape(response.height, response.width, 3)
# original image is fliped vertically
img_rgb = np.flipud(img_rgb)
min_acc = np.min(best_accuracies.keys)
current_acc = icp.check_accuracy(img_rgb, reference_image)
if (current_acc > min_acc):
del best_accuracies[min_acc]
best_accuracies[current_acc] = [image_index, img_rgb]
# write to png
if (image_index%SAMPLING_RATE == 0):
airsim.write_png(os.path.normpath(file_path + '.png'), img_rgb)
#Save row to dataframe
row = pc.create_row(test_mode, image_index, current_acc, params, filename)
df = pd.concat([df, row])
return
|
# Copyright Epic Games, Inc. All Rights Reserved
# Built-in
import argparse
import re
from pathlib import Path
from getpass import getuser
from collections import OrderedDict
# Internal
from deadline_service import get_global_deadline_service_instance
from deadline_job import DeadlineJob
from deadline_menus import DeadlineToolBarMenu
from deadline_utils import get_deadline_info_from_preset
# Third Party
import unreal
# Editor Utility Widget path
# NOTE: This is very fragile and can break if naming or pathing changes
EDITOR_UTILITY_WIDGET = "/project/"
def _launch_queue_asset_submitter():
"""
Callback to execute to launch the queue asset submitter
"""
unreal.log("Launching queue submitter.")
submitter_widget = unreal.EditorAssetLibrary.load_asset(EDITOR_UTILITY_WIDGET)
# Get editor subsystem
subsystem = unreal.get_editor_subsystem(unreal.EditorUtilitySubsystem)
# Spawn the submitter widget
subsystem.spawn_and_register_tab(submitter_widget)
def register_menu_action():
"""
Creates the toolbar menu
"""
if not _validate_euw_asset_exists():
unreal.log_warning(
f"EUW `{EDITOR_UTILITY_WIDGET}` does not exist in the Asset registry!"
)
return
toolbar = DeadlineToolBarMenu()
toolbar.register_submenu(
"SubmitMRQAsset",
_launch_queue_asset_submitter,
label_name="Submit Movie Render Queue Asset",
description="Submits a Movie Render Queue asset to Deadline"
)
def _validate_euw_asset_exists():
"""
Make sure our reference editor utility widget exists in
the asset registry
:returns: Array(AssetData) or None
"""
asset_registry = unreal.AssetRegistryHelpers.get_asset_registry()
asset_data = asset_registry.get_assets_by_package_name(
EDITOR_UTILITY_WIDGET,
include_only_on_disk_assets=True
)
return True if asset_data else False
def _execute_submission(args):
"""
Creates and submits the queue asset as a job to Deadline
:param args: Commandline args
"""
unreal.log("Executing job submission")
job_info, plugin_info = get_deadline_info_from_preset(
job_preset=unreal.load_asset(args.submission_job_preset)
)
# Due to some odd behavior in how Unreal passes string to the argparse,
# it adds extra quotes to the string, so we will strip the quotes out to get
# a single string representation.
batch_name = args.batch_name[0].strip('"')
# Update the Job Batch Name
job_info["BatchName"] = batch_name
# Set the name of the job if one is not set
if not job_info.get("Name"):
job_info["Name"] = Path(args.queue_asset).stem
# Set the Author of the job
if not job_info.get("UserName"):
job_info["UserName"] = getuser()
# Arguments to pass to the executable.
command_args = []
# Append all of our inherited command line arguments from the editor.
in_process_executor_settings = unreal.get_default_object(
unreal.MoviePipelineInProcessExecutorSettings
)
inherited_cmds = in_process_executor_settings.inherited_command_line_arguments
# Sanitize the commandline by removing any execcmds that may
# have passed through the commandline.
# We remove the execcmds because, in some cases, users may execute a
# script that is local to their editor build for some automated
# workflow but this is not ideal on the farm. We will expect all
# custom startup commands for rendering to go through the `Start
# Command` in the MRQ settings.
inherited_cmds = re.sub(
".(?P<cmds>-execcmds=[\w\W]+[\'\"])",
"",
inherited_cmds
)
command_args.extend(inherited_cmds.split(" "))
command_args.extend(
in_process_executor_settings.additional_command_line_arguments.split(
" "
)
)
# Build out custom queue command that will be used to render the queue on
# the farm.
queue_cmds = [
"py",
"mrq_cli.py",
"queue",
str(args.queue_asset),
"--remote",
"--cmdline",
"--batch_name",
batch_name,
"--deadline_job_preset",
str(args.remote_job_preset)
]
command_args.extend(
[
"-nohmd",
"-windowed",
"-ResX=1280",
"-ResY=720",
'-execcmds="{cmds}"'.format(cmds=" ".join(queue_cmds))
]
)
# Append the commandline args from the deadline plugin info
command_args.extend(plugin_info.get("CommandLineArguments", "").split(" "))
# Sanitize the commandline args
command_args = [arg for arg in command_args if arg not in [None, "", " "]]
# Remove all duplicates from the command args
full_cmd_args = " ".join(list(OrderedDict.fromkeys(command_args)))
# Get the current launched project file
if unreal.Paths.is_project_file_path_set():
# Trim down to just "Game.uproject" instead of absolute path.
game_name_or_project_file = (
unreal.Paths.convert_relative_path_to_full(
unreal.Paths.get_project_file_path()
)
)
else:
raise RuntimeError(
"Failed to get a project name. Please specify a project!"
)
if not plugin_info.get("ProjectFile"):
project_file = plugin_info.get("ProjectFile", game_name_or_project_file)
plugin_info["ProjectFile"] = project_file
# Update the plugin info. "CommandLineMode" tells Deadline to not use an
# interactive process to execute the job but launch it like a shell
# command and wait for the process to exit. `--cmdline` in our
# commandline arguments will tell the editor to shut down when the job is
# complete
plugin_info.update(
{
"CommandLineArguments": full_cmd_args,
"CommandLineMode": "true"
}
)
# Create a Deadline job from the selected preset library
deadline_job = DeadlineJob(job_info, plugin_info)
deadline_service = get_global_deadline_service_instance()
# Submit the Deadline Job
job_id = deadline_service.submit_job(deadline_job)
unreal.log(f"Deadline job submitted. JobId: {job_id}")
if __name__ == "__main__":
unreal.log("Executing queue submitter action")
parser = argparse.ArgumentParser(
description="Submits queue asset to Deadline",
add_help=False,
)
parser.add_argument(
"--batch_name",
type=str,
nargs='+',
help="Deadline Batch Name"
)
parser.add_argument(
"--submission_job_preset",
type=str,
help="Submitter Deadline Job Preset"
)
parser.add_argument(
"--remote_job_preset",
type=str,
help="Remote Deadline Job Preset"
)
parser.add_argument(
"--queue_asset",
type=str,
help="Movie Pipeline Queue Asset"
)
parser.set_defaults(func=_execute_submission)
# Parse the arguments and execute the function callback
arguments = parser.parse_args()
arguments.func(arguments)
|
"""
Creates a world component in Unreal Control Rig.
"""
# System global imports
# mca python imports
# software specific imports
import unreal
# mca python imports
# Internal module imports
from mca.ue.rigging.controlrig import cr_colorize_controls, cr_make_color, cr_find_item_with_tag
class UECollectionColorize:
def __init__(self, control_rig):
self.control_rig = control_rig
def spawn_nodes(self):
colorize_node = cr_colorize_controls.ColorizeControlsNode.spawn(self.control_rig, position=[0.0, 0.0])
colorize_size = cr_colorize_controls.NODE_SIZE
find_items_size = cr_find_item_with_tag.NODE_SIZE
color_size = cr_make_color.NODE_SIZE
|
# from cgl.plugins.unreal import alchemy as alc
import sys
import unreal
from PySide2 import QtWidgets, QtGui, QtUiTools, QtCore
from cgl.plugins.unreal_engine.ui.dialogs import LevelWidget
def run():
app = QtWidgets.QApplication.instance()
if not app:
app = QtWidgets.QApplication(sys.argv)
level_dialog = LevelWidget()
level_dialog.show()
app.exec_()
if __name__ == '__main__':
run()
print("IN MAIN")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
class ProcessHDA(object):
""" An object that wraps async processing of an HDA (instantiating,
cooking/project/ an HDA), with functions that are called at the
various stages of the process, that can be overridden by subclasses for
custom funtionality:
- on_failure()
- on_complete(): upon successful completion (could be PostInstantiation
if auto cook is disabled, PostProcessing if auto bake is disabled, or
after PostAutoBake if auto bake is enabled.
- on_pre_instantiation(): before the HDA is instantiated, a good place
to set parameter values before the first cook.
- on_post_instantiation(): after the HDA is instantiated, a good place
to set/configure inputs before the first cook.
- on_post_auto_cook(): right after a cook
- on_pre_process(): after a cook but before output objects have been
created/processed
- on_post_processing(): after output objects have been created
- on_post_auto_bake(): after outputs have been baked
Instantiate the processor via the constructor and then call the activate()
function to start the asynchronous process.
"""
def __init__(
self,
houdini_asset,
instantiate_at=unreal.Transform(),
parameters=None,
node_inputs=None,
parameter_inputs=None,
world_context_object=None,
spawn_in_level_override=None,
enable_auto_cook=True,
enable_auto_bake=False,
bake_directory_path="",
bake_method=unreal.HoudiniEngineBakeOption.TO_ACTOR,
remove_output_after_bake=False,
recenter_baked_actors=False,
replace_previous_bake=False,
delete_instantiated_asset_on_completion_or_failure=False):
""" Instantiates an HDA in the specified world/level. Sets parameters
and inputs supplied in InParameters, InNodeInputs and parameter_inputs.
If bInEnableAutoCook is true, cooks the HDA. If bInEnableAutoBake is
true, bakes the cooked outputs according to the supplied baking
parameters.
This all happens asynchronously, with the various output pins firing at
the various points in the process:
- PreInstantiation: before the HDA is instantiated, a good place
to set parameter values before the first cook (parameter values
from ``parameters`` are automatically applied at this point)
- PostInstantiation: after the HDA is instantiated, a good place
to set/configure inputs before the first cook (inputs from
``node_inputs`` and ``parameter_inputs`` are automatically applied
at this point)
- PostAutoCook: right after a cook
- PreProcess: after a cook but before output objects have been
created/processed
- PostProcessing: after output objects have been created
- PostAutoBake: after outputs have been baked
- Completed: upon successful completion (could be PostInstantiation
if auto cook is disabled, PostProcessing if auto bake is disabled,
or after PostAutoBake if auto bake is enabled).
- Failed: If the process failed at any point.
Args:
houdini_asset (HoudiniAsset): The HDA to instantiate.
instantiate_at (Transform): The Transform to instantiate the HDA with.
parameters (Map(Name, HoudiniParameterTuple)): The parameters to set before cooking the instantiated HDA.
node_inputs (Map(int32, HoudiniPublicAPIInput)): The node inputs to set before cooking the instantiated HDA.
parameter_inputs (Map(Name, HoudiniPublicAPIInput)): The parameter-based inputs to set before cooking the instantiated HDA.
world_context_object (Object): A world context object for identifying the world to spawn in, if spawn_in_level_override is null.
spawn_in_level_override (Level): If not nullptr, then the HoudiniAssetActor is spawned in that level. If both spawn_in_level_override and world_context_object are null, then the actor is spawned in the current editor context world's current level.
enable_auto_cook (bool): If true (the default) the HDA will cook automatically after instantiation and after parameter, transform and input changes.
enable_auto_bake (bool): If true, the HDA output is automatically baked after a cook. Defaults to false.
bake_directory_path (str): The directory to bake to if the bake path is not set via attributes on the HDA output.
bake_method (HoudiniEngineBakeOption): The bake target (to actor vs blueprint). @see HoudiniEngineBakeOption.
remove_output_after_bake (bool): If true, HDA temporary outputs are removed after a bake. Defaults to false.
recenter_baked_actors (bool): Recenter the baked actors to their bounding box center. Defaults to false.
replace_previous_bake (bool): If true, on every bake replace the previous bake's output (assets + actors) with the new bake's output. Defaults to false.
delete_instantiated_asset_on_completion_or_failure (bool): If true, deletes the instantiated asset actor on completion or failure. Defaults to false.
"""
super(ProcessHDA, self).__init__()
self._houdini_asset = houdini_asset
self._instantiate_at = instantiate_at
self._parameters = parameters
self._node_inputs = node_inputs
self._parameter_inputs = parameter_inputs
self._world_context_object = world_context_object
self._spawn_in_level_override = spawn_in_level_override
self._enable_auto_cook = enable_auto_cook
self._enable_auto_bake = enable_auto_bake
self._bake_directory_path = bake_directory_path
self._bake_method = bake_method
self._remove_output_after_bake = remove_output_after_bake
self._recenter_baked_actors = recenter_baked_actors
self._replace_previous_bake = replace_previous_bake
self._delete_instantiated_asset_on_completion_or_failure = delete_instantiated_asset_on_completion_or_failure
self._asset_wrapper = None
self._cook_success = False
self._bake_success = False
@property
def asset_wrapper(self):
""" The asset wrapper for the instantiated HDA processed by this node. """
return self._asset_wrapper
@property
def cook_success(self):
""" True if the last cook was successful. """
return self._cook_success
@property
def bake_success(self):
""" True if the last bake was successful. """
return self._bake_success
@property
def houdini_asset(self):
""" The HDA to instantiate. """
return self._houdini_asset
@property
def instantiate_at(self):
""" The transform the instantiate the asset with. """
return self._instantiate_at
@property
def parameters(self):
""" The parameters to set on on_pre_instantiation """
return self._parameters
@property
def node_inputs(self):
""" The node inputs to set on on_post_instantiation """
return self._node_inputs
@property
def parameter_inputs(self):
""" The object path parameter inputs to set on on_post_instantiation """
return self._parameter_inputs
@property
def world_context_object(self):
""" The world context object: spawn in this world if spawn_in_level_override is not set. """
return self._world_context_object
@property
def spawn_in_level_override(self):
""" The level to spawn in. If both this and world_context_object is not set, spawn in the editor context's level. """
return self._spawn_in_level_override
@property
def enable_auto_cook(self):
""" Whether to set the instantiated asset to auto cook. """
return self._enable_auto_cook
@property
def enable_auto_bake(self):
""" Whether to set the instantiated asset to auto bake after a cook. """
return self._enable_auto_bake
@property
def bake_directory_path(self):
""" Set the fallback bake directory, for if output attributes do not specify it. """
return self._bake_directory_path
@property
def bake_method(self):
""" The bake method/target: for example, to actors vs to blueprints. """
return self._bake_method
@property
def remove_output_after_bake(self):
""" Remove temporary HDA output after a bake. """
return self._remove_output_after_bake
@property
def recenter_baked_actors(self):
""" Recenter the baked actors at their bounding box center. """
return self._recenter_baked_actors
@property
def replace_previous_bake(self):
""" Replace previous bake output on each bake. For the purposes of this
node, this would mostly apply to .uassets and not actors.
"""
return self._replace_previous_bake
@property
def delete_instantiated_asset_on_completion_or_failure(self):
""" Whether or not to delete the instantiated asset after Complete is called. """
return self._delete_instantiated_asset_on_completion_or_failure
def activate(self):
""" Activate the process. This will:
- instantiate houdini_asset and wrap it as asset_wrapper
- call on_failure() for any immediate failures
- otherwise bind to delegates from asset_wrapper so that the
various self.on_*() functions are called as appropriate
Returns immediately (does not block until cooking/processing is
complete).
Returns:
(bool): False if activation failed.
"""
# Get the API instance
houdini_api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
if not houdini_api:
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
# Create an empty API asset wrapper
self._asset_wrapper = unreal.HoudiniPublicAPIAssetWrapper.create_empty_wrapper(houdini_api)
if not self._asset_wrapper:
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
# Bind to the wrapper's delegates for instantiation, cooking, baking
# etc events
self._asset_wrapper.on_pre_instantiation_delegate.add_callable(
self._handle_on_pre_instantiation)
self._asset_wrapper.on_post_instantiation_delegate.add_callable(
self._handle_on_post_instantiation)
self._asset_wrapper.on_post_cook_delegate.add_callable(
self._handle_on_post_auto_cook)
self._asset_wrapper.on_pre_process_state_exited_delegate.add_callable(
self._handle_on_pre_process)
self._asset_wrapper.on_post_processing_delegate.add_callable(
self._handle_on_post_processing)
self._asset_wrapper.on_post_bake_delegate.add_callable(
self._handle_on_post_auto_bake)
# Begin the instantiation process of houdini_asset and wrap it with
# self.asset_wrapper
if not houdini_api.instantiate_asset_with_existing_wrapper(
self.asset_wrapper,
self.houdini_asset,
self.instantiate_at,
self.world_context_object,
self.spawn_in_level_override,
self.enable_auto_cook,
self.enable_auto_bake,
self.bake_directory_path,
self.bake_method,
self.remove_output_after_bake,
self.recenter_baked_actors,
self.replace_previous_bake):
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
return True
def _unbind_delegates(self):
""" Unbinds from self.asset_wrapper's delegates (if valid). """
if not self._asset_wrapper:
return
self._asset_wrapper.on_pre_instantiation_delegate.add_callable(
self._handle_on_pre_instantiation)
self._asset_wrapper.on_post_instantiation_delegate.add_callable(
self._handle_on_post_instantiation)
self._asset_wrapper.on_post_cook_delegate.add_callable(
self._handle_on_post_auto_cook)
self._asset_wrapper.on_pre_process_state_exited_delegate.add_callable(
self._handle_on_pre_process)
self._asset_wrapper.on_post_processing_delegate.add_callable(
self._handle_on_post_processing)
self._asset_wrapper.on_post_bake_delegate.add_callable(
self._handle_on_post_auto_bake)
def _check_wrapper(self, wrapper):
""" Checks that wrapper matches self.asset_wrapper. Logs a warning if
it does not.
Args:
wrapper (HoudiniPublicAPIAssetWrapper): the wrapper to check
against self.asset_wrapper
Returns:
(bool): True if the wrappers match.
"""
if wrapper != self._asset_wrapper:
unreal.log_warning(
'[UHoudiniPublicAPIProcessHDANode] Received delegate event '
'from unexpected asset wrapper ({0} vs {1})!'.format(
self._asset_wrapper.get_name() if self._asset_wrapper else '',
wrapper.get_name() if wrapper else ''
)
)
return False
return True
def _handle_on_failure(self):
""" Handle any failures during the lifecycle of the process. Calls
self.on_failure() and then unbinds from self.asset_wrapper and
optionally deletes the instantiated asset.
"""
self.on_failure()
self._unbind_delegates()
if self.delete_instantiated_asset_on_completion_or_failure and self.asset_wrapper:
self.asset_wrapper.delete_instantiated_asset()
def _handle_on_complete(self):
""" Handles completion of the process. This can happen at one of
three stages:
- After on_post_instantiate(), if enable_auto_cook is False.
- After on_post_auto_cook(), if enable_auto_cook is True but
enable_auto_bake is False.
- After on_post_auto_bake(), if both enable_auto_cook and
enable_auto_bake are True.
Calls self.on_complete() and then unbinds from self.asset_wrapper's
delegates and optionally deletes the instantiated asset.
"""
self.on_complete()
self._unbind_delegates()
if self.delete_instantiated_asset_on_completion_or_failure and self.asset_wrapper:
self.asset_wrapper.delete_instantiated_asset()
def _handle_on_pre_instantiation(self, wrapper):
""" Called during pre_instantiation. Sets ``parameters`` on the HDA
and calls self.on_pre_instantiation().
"""
if not self._check_wrapper(wrapper):
return
# Set any parameters specified for the HDA
if self.asset_wrapper and self.parameters:
self.asset_wrapper.set_parameter_tuples(self.parameters)
self.on_pre_instantiation()
def _handle_on_post_instantiation(self, wrapper):
""" Called during post_instantiation. Sets inputs (``node_inputs`` and
``parameter_inputs``) on the HDA and calls self.on_post_instantiation().
Completes execution if enable_auto_cook is False.
"""
if not self._check_wrapper(wrapper):
return
# Set any inputs specified when the node was created
if self.asset_wrapper:
if self.node_inputs:
self.asset_wrapper.set_inputs_at_indices(self.node_inputs)
if self.parameter_inputs:
self.asset_wrapper.set_input_parameters(self.parameter_inputs)
self.on_post_instantiation()
# If not set to auto cook, complete execution now
if not self.enable_auto_cook:
self._handle_on_complete()
def _handle_on_post_auto_cook(self, wrapper, cook_success):
""" Called during post_cook. Sets self.cook_success and calls
self.on_post_auto_cook().
Args:
cook_success (bool): True if the cook was successful.
"""
if not self._check_wrapper(wrapper):
return
self._cook_success = cook_success
self.on_post_auto_cook(cook_success)
def _handle_on_pre_process(self, wrapper):
""" Called during pre_process. Calls self.on_pre_process().
"""
if not self._check_wrapper(wrapper):
return
self.on_pre_process()
def _handle_on_post_processing(self, wrapper):
""" Called during post_processing. Calls self.on_post_processing().
Completes execution if enable_auto_bake is False.
"""
if not self._check_wrapper(wrapper):
return
self.on_post_processing()
# If not set to auto bake, complete execution now
if not self.enable_auto_bake:
self._handle_on_complete()
def _handle_on_post_auto_bake(self, wrapper, bake_success):
""" Called during post_bake. Sets self.bake_success and calls
self.on_post_auto_bake().
Args:
bake_success (bool): True if the bake was successful.
"""
if not self._check_wrapper(wrapper):
return
self._bake_success = bake_success
self.on_post_auto_bake(bake_success)
self._handle_on_complete()
def on_failure(self):
""" Called if the process fails to instantiate or fails to start
a cook.
Subclasses can override this function implement custom functionality.
"""
pass
def on_complete(self):
""" Called if the process completes instantiation, cook and/or baking,
depending on enable_auto_cook and enable_auto_bake.
Subclasses can override this function implement custom functionality.
"""
pass
def on_pre_instantiation(self):
""" Called during pre_instantiation.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_instantiation(self):
""" Called during post_instantiation.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_auto_cook(self, cook_success):
""" Called during post_cook.
Subclasses can override this function implement custom functionality.
Args:
cook_success (bool): True if the cook was successful.
"""
pass
def on_pre_process(self):
""" Called during pre_process.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_processing(self):
""" Called during post_processing.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_auto_bake(self, bake_success):
""" Called during post_bake.
Subclasses can override this function implement custom functionality.
Args:
bake_success (bool): True if the bake was successful.
"""
pass
|
# Copyright (c) 2023 Max Planck Society
# License: https://bedlam.is.tuebingen.mpg.de/license.html
#
# Batch import CLO exported Alembic .abc files into Unreal Engine
#
import math
import os
from pathlib import Path
import sys
import time
import unreal
data_root = r"/project/"
#whitelist_subjects_path = r"/project/.txt"
whitelist_subjects_path = None
data_root_unreal = "/project/"
#whitelist_animations_path = r"/project/.txt"
whitelist_animations_path = None
def import_abc(data_root, data_root_unreal, current_batch, num_batches, whitelist_subjects=None, whitelist_animations=None):
# Build import list
import_abc_paths = sorted(Path(data_root).rglob("*.abc"))
if current_batch is not None:
section_length = math.ceil(len(import_abc_paths)/num_batches)
start_index = current_batch * section_length
end_index = start_index + section_length
if end_index > len(import_abc_paths):
end_index = len(import_abc_paths)
print(f"Processing section: {current_batch}, total sections: {num_batches}, range: [{start_index}:{end_index}]")
import_abc_paths = import_abc_paths[start_index : end_index]
import_tasks = []
for import_abc_path in import_abc_paths:
if whitelist_subjects is not None:
current_subject_name = import_abc_path.parent.parent.parent.name
if current_subject_name not in whitelist_subjects:
unreal.log(f"Skipping Alembic. Subject not whitelisted: {import_abc_path}")
continue
if whitelist_animations is not None:
current_animation_name = import_abc_path.stem
if current_animation_name not in whitelist_animations:
unreal.log(f"Skipping Alembic. Animation not whitelisted: {import_abc_path}")
continue
unreal.log(f'Processing Alembic: {import_abc_path}')
# unreal.log_flush() # Note: does not update editor console log
# Example: rp_aaron_posed_002\clothing_simulations\0000\0000.abc
uasset_folder_name = import_abc_path.parent.parent.parent.name
uasset_folder = f"{data_root_unreal}/{uasset_folder_name}"
uasset_name = f"{uasset_folder_name}_{import_abc_path.stem}_clo"
uasset_path = f"{uasset_folder}/{uasset_name}"
# Check if file is already imported
if unreal.EditorAssetLibrary.does_asset_exist(uasset_path):
unreal.log(" Skipping import. Already imported: " + uasset_path)
else:
unreal.log(" Importing: " + uasset_path)
options = unreal.AbcImportSettings()
options.import_type = unreal.AlembicImportType.GEOMETRY_CACHE
# BUG (Unreal 5.0.3):
# Importing with Python using frame_start=101 and frame_end=0 will lead to invalid last frame in sequence (last frame is same as the start frame).
# We have to use frame_end=0 value since we don't know the exact number of frames for each file when using Unreal Python API.
# Importing via Unreal Editor GUI (Frame Start=101, Frame End=0) will not have this issue but will have one additional frame in sequence at end.
# Clothing simulation contains 100 warmup frames which we need to skip on import
options.sampling_settings = unreal.AbcSamplingSettings(sampling_type=unreal.AlembicSamplingType.PER_FRAME,
frame_steps=1,
time_steps=0.0,
frame_start=101,
frame_end=0,
skip_empty=False)
# Maximum quality settings
options.geometry_cache_settings = unreal.AbcGeometryCacheSettings(flatten_tracks=True,
apply_constant_topology_optimizations=False,
motion_vectors=unreal.AbcGeometryCacheMotionVectorsImport.NO_MOTION_VECTORS,
optimize_index_buffers=False,
compressed_position_precision=0.01,
compressed_texture_coordinates_number_of_bits=16) # default: 10
# Source Alembic data exported from Blender [m], apply conversion settings to convert to Unreal [cm]
options.conversion_settings = unreal.AbcConversionSettings(preset=unreal.AbcConversionPreset.CUSTOM, flip_u=False, flip_v=True, scale=[100.0, - 100.0, 100.0], rotation=[90.0, 0.0, 0.0])
task = unreal.AssetImportTask()
task.set_editor_property("automated", True)
task.set_editor_property("filename", str(import_abc_path))
task.set_editor_property("destination_path", uasset_folder)
task.set_editor_property("destination_name", uasset_name)
task.set_editor_property("replace_existing", True)
task.set_editor_property("save", True)
task.set_editor_property("options", options)
# Import one Alembic file at a time and save all imported assets immediately to avoid data loss on Unreal Editor crash
import_tasks = [task]
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(import_tasks)
unreal.EditorAssetLibrary.save_directory(data_root_unreal) # save imported materials and textures
######################################################################
# Main
######################################################################
if __name__ == '__main__':
unreal.log("============================================================")
unreal.log("Running: %s" % __file__)
current_batch = None
num_batches = None
if len(sys.argv) == 3:
current_batch = int(sys.argv[1])
num_batches = int(sys.argv[2])
whitelist_subjects = None
if whitelist_subjects_path is not None:
with open(whitelist_subjects_path) as f:
whitelist_subjects = f.read().splitlines()
whitelist_animations = None
if whitelist_animations_path is not None:
with open(whitelist_animations_path) as f:
whitelist_animations = f.read().splitlines()
# Import Alembic files
start_time = time.perf_counter()
import_abc(data_root, data_root_unreal, current_batch, num_batches, whitelist_subjects, whitelist_animations)
print(f"Alembic batch import finished. Total import time: {(time.perf_counter() - start_time):.1f}s")
|
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import unreal
from Utilities.Utils import Singleton
import random
import re
if sys.platform == "darwin":
import webbrowser
class ChameleonGallery(metaclass=Singleton):
def __init__(self, jsonPath):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_scrollbox = "ScrollBox"
self.ui_crumbname = "SBreadcrumbTrailA"
self.ui_image = "SImageA"
self.ui_image_local = "SImage_ImageFromRelativePath"
self.ui_imageB = "SImage_ImageFromPath"
self.ui_progressBar = "ProgressBarA"
self.ui_drop_target_text_box = "DropResultBox"
self.ui_python_not_ready = "IsPythonReadyImg"
self.ui_python_is_ready = "IsPythonReadyImgB"
self.ui_is_python_ready_text = "IsPythonReadyText"
self.ui_details_view = "DetailsView"
self.ui_color_block = "ColorBlock"
self.ui_button_expand_color_picker = "ButtonExpandColorPicker"
self.ui_color_picker = "ColorPicker"
self.ui_dpi_scaler = "DPIScaler"
self.imageFlagA = 0
self.imageFlagB = 0
# set data in init
self.set_random_image_data()
self.data.set_combo_box_items('CombBoxA', ['1', '3', '5'])
self.data.set_object(self.ui_details_view, self.data)
self.is_color_picker_shown = self.data.get_visibility(self.ui_color_picker) == "Visible"
self.linearColor_re = re.compile(r"\(R=([-\d.]+),G=([-\d.]+),B=([-\d.]+),A=([-\d.]+)\)")
self.tapython_version = dict(unreal.PythonBPLib.get_ta_python_version())
print("ChameleonGallery.Init")
def mark_python_ready(self):
print("mark_python_ready call")
self.data.set_visibility(self.ui_python_not_ready, "Collapsed")
self.data.set_visibility(self.ui_python_is_ready, "Visible")
self.data.set_text(self.ui_is_python_ready_text, "Python Path Ready.")
def push_breadcrumb(self):
count = self.data.get_breadcrumbs_count_string(self.ui_crumbname)
strs = "is breadcrumb tail from alice in wonder world"
label = strs.split()[count % len(strs.split())]
self.data.push_breadcrumb_string(self.ui_crumbname, label, label)
def set_random_image_data(self):
width = 64
height = 64
colors = [unreal.LinearColor(1, 1, 1, 1) if random.randint(0, 1) else unreal.LinearColor(0, 0, 0, 1) for _ in range(width * height)]
self.data.set_image_pixels(self.ui_image, colors, width, height)
def set_random_progress_bar_value(self):
self.data.set_progress_bar_percent(self.ui_progressBar,random.random())
def change_local_image(self):
self.data.set_image_from(self.ui_image_local, ["Images/ChameleonLogo_c.png", "Images/ChameleonLogo_b.png"][self.imageFlagA])
self.imageFlagA = (self.imageFlagA + 1) % 2
def change_image(self):
self.data.set_image_from_path(self.ui_imageB, ["PythonChameleonIcon_128x.png", "Icon128.png"][self.imageFlagB])
self.imageFlagB = (self.imageFlagB + 1) % 2
def change_comboBox_items(self):
offset = random.randint(1, 10)
items = [str(v+offset) for v in range(random.randint(1, 10))]
self.data.set_combo_box_items("CombBoxA", items)
def launch_other_galleries(self):
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'auto_gen/border_brushes_Gallery.json')):
unreal.PythonBPLib.notification("auto-generated Galleries not exists", info_level=1)
return
gallery_paths = ['ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json'
]
bLaunch = unreal.PythonBPLib.confirm_dialog(f'Open Other {len(gallery_paths)} Galleries? You can close them with the "Close all Gallery" Button' , "Open Other Galleries", with_cancel_button=False)
if bLaunch:
with unreal.ScopedSlowTask(len(gallery_paths), "Spawn Actors") as slow_task:
slow_task.make_dialog(True)
for i, p in enumerate(gallery_paths):
slow_task.enter_progress_frame(1, f"Launch Gallery: {p}")
unreal.ChameleonData.launch_chameleon_tool(p)
def request_close_other_galleries(self):
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'auto_gen/border_brushes_Gallery.json')):
unreal.PythonBPLib.notification("auto-generated Galleries not exists", info_level=1)
return
gallery_paths = ['ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json'
]
for i, p in enumerate(gallery_paths):
unreal.ChameleonData.request_close(p)
# unreal.ChameleonData.request_close('/project/.json')
exists_tools_var = [globals()[x] for x in globals() if "Utilities.Utils.Singleton" in str(type(type(globals()[x])))]
def on_drop(self, assets, assets_folders, actors):
str_for_show = ""
for items, name in zip([assets, assets_folders, actors], ["Assets:", "Assets Folders:", "Actors:"]):
if items:
str_for_show += f"{name}\n"
for item in items:
str_for_show += f"\t{item}\n"
self.data.set_text(self.ui_drop_target_text_box, str_for_show)
print(f"str_for_show: {str_for_show}")
def on_drop_func(self, *args, **kwargs):
print(f"args: {args}")
print(f"kwargs: {kwargs}")
str_for_show = ""
for name, items in kwargs.items():
if items:
str_for_show += f"{name}:\n"
for item in items:
str_for_show += f"\t{item}\n"
self.data.set_text(self.ui_drop_target_text_box, str_for_show)
def get_full_size_of_this_chameleon(self):
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
scrollbox_offsets = self.data.get_scroll_box_offsets(self.ui_scrollbox)
height_full = scrollbox_offsets["ScrollOffsetOfEnd"] / (1.0-scrollbox_offsets["viewFraction"])
height_full += 48
print(f"delta: {height_full} - {round(height_full)}")
return current_size.x, round(height_full)
def on_button_ChangeTabSize_click(self, offset_pixel):
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
print(f"currentSize: {current_size}")
offsets = self.data.get_scroll_box_offsets(self.ui_scrollbox)
print(offsets)
if current_size:
current_size.x += offset_pixel
unreal.ChameleonData.set_chameleon_window_size("ChameleonGallery/ChameleonGallery.json", current_size)
def on_button_FlashWindow_click(self):
unreal.ChameleonData.flash_chameleon_window("ChameleonGallery/ChameleonGallery.json")
def on_button_Snapshot_click(self):
full_size = self.get_full_size_of_this_chameleon()
print(f"try save snapshot @ {full_size}")
saved_file_path = unreal.ChameleonData.snapshot_chameleon_window(self.jsonPath, unreal.Vector2D(*full_size))
if saved_file_path:
unreal.PythonBPLib.notification(f"UI Snapshot Saved:", hyperlink_text = saved_file_path
, on_hyperlink_click_command = f'chameleon_gallery.explorer("{saved_file_path}")')
else:
unreal.PythonBPLib.notification(f"Save UI snapshot failed.", info_level = 1)
def explorer(self, file_path):
if sys.platform == "darwin":
webbrowser.open(os.path.dirname(file_path))
else:
file_path = file_path.replace("/", "\\")
subprocess.call('explorer "{}" '.format(os.path.dirname(file_path)))
def set_selected_actor_to_details_view(self):
selected = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
if selected:
self.data.set_object(self.ui_details_view, selected[0])
else:
print("Selected None")
def on_expand_color_picker_click(self):
self.data.set_visibility(self.ui_color_picker, "Collapsed" if self.is_color_picker_shown else "Visible")
self.data.set_text(self.ui_button_expand_color_picker, "Expand ColorPicker" if self.is_color_picker_shown else "Collapse ColorPicker")
self.is_color_picker_shown = not self.is_color_picker_shown
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
if current_size.x < 650:
current_size.x = 650
unreal.ChameleonData.set_chameleon_window_size("ChameleonGallery/ChameleonGallery.json", current_size)
def on_color_picker_commit(self, color_str):
v = [float(a) for a in self.linearColor_re.match(color_str).groups()]
self.data.set_color(self.ui_color_block, unreal.LinearColor(*v))
def change_dpi_scaler_value(self, value):
if self.tapython_version["Minor"] < 2 or(
self.tapython_version["Minor"] == 2 and self.tapython_version["Patch"] < 1
):
print("Need TAPython version >= 1.2.1")
return
self.data.set_dpi_scale(self.ui_dpi_scaler, value + 0.5)
|
import unreal
import os
import sys
import subprocess
import json
import time
import importlib.util
# ======================================================================
# Configuration - Modify these values as needed
# ======================================================================
# Set your project paths
WORKSPACE_ROOT = r"C:/project/.croisez/project/"
SCRIPTS_DIR = os.path.join(WORKSPACE_ROOT, "01_Scripts")
# Iteration number for file naming
ITERATION_NUMBER = 5 # Change this as needed
SWITCH_BOOL = 0
# Unreal Engine paths
UE_BASE_PATH = "/project/"
# Path includes class name at the end for compatibility with the PCG graph script
UE_PCG_TEMPLATE_BP_PATH = f"{UE_BASE_PATH}/project/.BP_PCG_HD_TEMPLATE"
UE_SPLINE_BP_PATH = f"{UE_BASE_PATH}/project/"
UE_MESH_TEMPLATE_PATH = f"{UE_BASE_PATH}/project/"
UE_MAT_TEMPLATE_PATH = f"{UE_BASE_PATH}/project/"
# Houdini paths
HOUDINI_INSTALL_PATH = r"C:/Program Files/Side Effects Software/Houdini 20.0.653"
# HIP file for PCG generation
HIP_FILE_PATH = r"C:/project/.croisez/project/.hip"
# HIP file for sidewalks and roads generation
SWR_HIP_FILE_PATH = r"C:/project/.croisez/project/.hip"
# Function to generate the file1 path with the current iteration number (legacy approach)
def get_file1_path(iteration_number):
return f"{WORKSPACE_ROOT}/project/{iteration_number}.fbx"
# Function to generate the fbx folder path with the current iteration number (new approach)
def get_fbx_folder_path(iteration_number):
return f"{WORKSPACE_ROOT}/project/{iteration_number}"
# Default file1 path (will be set dynamically with the iteration number)
FILE1_PATH = get_file1_path(ITERATION_NUMBER)
# Default fbx folder path (will be set dynamically with the iteration number)
FBX_FOLDER_PATH = get_fbx_folder_path(ITERATION_NUMBER)
# Output directories
SPLINES_OUTPUT_DIR = os.path.join(WORKSPACE_ROOT, "03_GenDatas", "Dependancies", "PCG_HD", "In", "GZ", "Splines")
SPLINES_BASEPATH_OUTPUT_DIR = os.path.join(WORKSPACE_ROOT, "03_GenDatas", "Dependancies", "PCG_HD", "In", "GZ", "Splines", "splines_export_from_UE_")
CSV_OUTPUT_DIR = os.path.join(WORKSPACE_ROOT, "03_GenDatas", "Dependancies", "PCG_HD", "Out", "CSV")
# Directory for sidewalks and roads FBX files
SWR_FBX_OUTPUT_DIR = os.path.join(WORKSPACE_ROOT, "03_GenDatas", "Dependancies", "SW_Roads", "Out", "Mod")
# Create necessary directories
for directory in [SPLINES_OUTPUT_DIR, CSV_OUTPUT_DIR, SWR_FBX_OUTPUT_DIR]:
os.makedirs(directory, exist_ok=True)
# ======================================================================
# Script Runner Function
# ======================================================================
def run_script(script_name, function_name, **kwargs):
"""
Run a function from a script file with the given arguments
Args:
script_name (str): Name of the script file (e.g., "000_export_splines_as_json.py")
function_name (str): Name of the function to call
**kwargs: Arguments to pass to the function
"""
try:
# Full path to the script
script_path = os.path.join(SCRIPTS_DIR, script_name)
# Import the module dynamically
module_name = os.path.splitext(script_name)[0]
spec = importlib.util.spec_from_file_location(module_name, script_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
# Get the function
if hasattr(module, function_name):
func = getattr(module, function_name)
# Call the function with the provided arguments
unreal.log(f"Running {function_name} from {script_name} with args: {kwargs}")
result = func(**kwargs)
unreal.log(f"Result: {result}")
return result
else:
unreal.log_error(f"Function '{function_name}' not found in {script_name}")
return None
except Exception as e:
unreal.log_error(f"Error running {script_name}: {str(e)}")
return None
# ======================================================================
# Script Functions - Uncomment the ones you want to run
# ======================================================================
# Export splines to JSON
#result = run_script("000_export_splines_as_json.py", "export_splines_to_json",
# iteration_number=ITERATION_NUMBER,
# output_dir=SPLINES_OUTPUT_DIR)
#unreal.log(f"Splines export result: {result}")
# Export GenZone meshes
#result = run_script("010_export_gz_to_mod.py", "main",
# iteration_number=ITERATION_NUMBER)
#unreal.log(f"GenZone export result: {result}")
# Run Houdini PCG generation
def run_houdini_headless(iteration_number, houdini_install_path, hip_file_path, file1_path=None, fbx_folder_path=None, transforms_json_path=None, base_path=None, switch_bool=None):
# Use global SWITCH_BOOL if switch_bool is not provided
if switch_bool is None:
switch_bool = SWITCH_BOOL
"""Run Houdini in headless mode to generate PCG data"""
try:
unreal.log(f"Starting Houdini headless process with iteration number: {iteration_number}")
unreal.log(f"Houdini install path: {houdini_install_path}")
unreal.log(f"Hip file path: {hip_file_path}")
# Check if the hip file exists
if not os.path.exists(hip_file_path):
unreal.log_error(f"Houdini .hip file not found at: {hip_file_path}")
return None
else:
unreal.log(f"Houdini .hip file found at: {hip_file_path}")
# Get the path to the Houdini Python interpreter (hython)
hython_path = os.path.join(houdini_install_path, "bin", "hython.exe")
if not os.path.exists(hython_path):
unreal.log_error(f"Hython not found at: {hython_path}")
return None
else:
unreal.log(f"Hython found at: {hython_path}")
# Get the path to the headless script
headless_script = os.path.join(SCRIPTS_DIR, "100_headless_topnet_PCGHD.py")
if not os.path.exists(headless_script):
unreal.log_error(f"Headless script not found at: {headless_script}")
return None
else:
unreal.log(f"Headless script found at: {headless_script}")
# Helper function to normalize paths to use forward slashes
def normalize_path(path):
"""Convert Windows path to use forward slashes"""
return path.replace('\\', '/')
# Define input/output paths with forward slashes for Houdini
mesh_csv_path = normalize_path(os.path.join(CSV_OUTPUT_DIR, f"mesh_{iteration_number}.csv"))
mat_csv_path = normalize_path(os.path.join(CSV_OUTPUT_DIR, f"mat_{iteration_number}.csv"))
# Define the splines base path (without iteration number and file extension)
splines_base_path = normalize_path(os.path.join(SPLINES_OUTPUT_DIR, "splines_export_from_UE_"))
# Ensure output directories exist
os.makedirs(os.path.dirname(mesh_csv_path), exist_ok=True)
os.makedirs(os.path.dirname(mat_csv_path), exist_ok=True)
os.makedirs(SPLINES_OUTPUT_DIR, exist_ok=True)
# Log the normalized paths
unreal.log(f"Normalized mesh CSV path: {mesh_csv_path}")
unreal.log(f"Normalized material CSV path: {mat_csv_path}")
unreal.log(f"Normalized splines base path: {splines_base_path}")
# Build the command to run Houdini in headless mode
cmd = [
hython_path,
headless_script,
"--hip", hip_file_path,
"--topnet", "/project/",
"--rop_pcg_export1_mesh_path", mesh_csv_path,
"--rop_pcg_export1_mat_path", mat_csv_path,
"--iteration_number", str(iteration_number),
"--switch_bool", str(switch_bool),
"--base_path", splines_base_path # Add the base_path parameter
# Remove arguments that aren't recognized by the original script
# "--ignore_load_warnings"
]
# Add fbx_folder_path to cmd if provided (new approach)
if fbx_folder_path is not None:
fbx_folder_path = normalize_path(fbx_folder_path)
cmd.extend(["--fbx_folder_path", fbx_folder_path])
unreal.log(f"Adding fbx_folder_path: {fbx_folder_path}")
# If we're using fbx_folder_path, also add the transforms_json_path
# This is automatically constructed based on the fbx_folder_path
if transforms_json_path is None:
# Construct the transforms.json path from the fbx_folder_path
transforms_json_path = os.path.join(fbx_folder_path, "transforms.json")
transforms_json_path = normalize_path(transforms_json_path)
cmd.extend(["--transforms_json_path", transforms_json_path])
unreal.log(f"Adding transforms_json_path: {transforms_json_path}")
# Legacy support for file1_path
elif file1_path is not None:
# Use the provided file1_path
file1_path = normalize_path(file1_path)
cmd.extend(["--file1_path", file1_path])
unreal.log(f"Adding legacy file1_path: {file1_path}")
else:
# Default
default_file1_path = normalize_path(FILE1_PATH)
cmd.extend(["--file1_path", default_file1_path])
unreal.log(f"Adding default legacy file1_path: {default_file1_path}")
unreal.log(f"Launching Houdini in headless mode...")
unreal.log(f"Command: {' '.join(cmd)}")
# Check if the hip file exists
if not os.path.exists(hip_file_path):
unreal.log_error(f"Houdini .hip file not found at: {hip_file_path}")
return None
# Check if the splines directory exists and has files
splines_dir = os.path.dirname(splines_base_path)
if not os.path.exists(splines_dir):
unreal.log_warning(f"Splines directory does not exist: {splines_dir}")
os.makedirs(splines_dir, exist_ok=True)
else:
# Check for the specific spline file we're expecting
expected_spline_file = f"splines_export_from_UE_{iteration_number}.json"
spline_files = [f for f in os.listdir(splines_dir) if f.endswith('.json')]
if expected_spline_file not in spline_files:
unreal.log_warning(f"Expected spline file not found: {expected_spline_file}")
unreal.log_warning(f"You may need to run the spline export script first")
# Run the command without creating a new console, to capture output
unreal.log("Running Houdini process and capturing output...")
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
# Wait a short time and check if process is still running
time.sleep(2)
if process.poll() is not None:
# Process has already terminated
stdout, stderr = process.communicate()
unreal.log_error(f"Houdini process terminated immediately with exit code: {process.returncode}")
unreal.log(f"STDOUT: {stdout}")
unreal.log_error(f"STDERR: {stderr}")
return None
unreal.log(f"Houdini process started with PID: {process.pid}")
unreal.log(f"Process is running. Waiting for completion...")
# Wait for process to complete with a timeout
try:
stdout, stderr = process.communicate(timeout=30) # 30 second timeout
unreal.log(f"Houdini process completed with exit code: {process.returncode}")
unreal.log(f"STDOUT: {stdout}")
if stderr:
unreal.log_error(f"STDERR: {stderr}")
except subprocess.TimeoutExpired:
unreal.log("Houdini process is still running after timeout. Continuing in background.")
# Don't kill the process, let it continue running
return {
'process_id': process.pid,
'mesh_csv_path': mesh_csv_path,
'mat_csv_path': mat_csv_path,
'status': 'running'
}
except Exception as e:
unreal.log_error(f"Error launching Houdini: {str(e)}")
return None
# Run Houdini PCG generation
result = run_houdini_headless(
iteration_number=ITERATION_NUMBER,
houdini_install_path=HOUDINI_INSTALL_PATH,
hip_file_path=HIP_FILE_PATH,
fbx_folder_path=FBX_FOLDER_PATH, # Use the new approach with folder path
# file1_path=FILE1_PATH, # Legacy approach (commented out)
base_path=SPLINES_BASEPATH_OUTPUT_DIR,
switch_bool=SWITCH_BOOL
)
unreal.log(f"Houdini PCG generation result: {result}")
# Reimport datatables
#result = run_script("110_reimport_datatable.py", "reimport_datatables",
# iteration_number=ITERATION_NUMBER,
# csv_dir=CSV_OUTPUT_DIR)
#unreal.log(f"Reimport datatables result: {result}")
# Create PCG graph (uncomment to use)
#result = run_script("120_create_pcg_graph.py", "create_pcg_graph",
# iteration_number=ITERATION_NUMBER,
# template_bp_path=UE_PCG_TEMPLATE_BP_PATH)
#unreal.log(f"Create PCG graph result: {result}")
# Run Houdini sidewalks & roads generation (uncomment to use)
def run_houdini_sidewalks_roads(iteration_number, houdini_install_path, hip_file_path=None, file1_path=None, fbx_folder_path=None, transforms_json_path=None, base_path=None, switch_bool=None):
# Use global SWITCH_BOOL if switch_bool is not provided
if switch_bool is None:
switch_bool = SWITCH_BOOL
# Use SWR_HIP_FILE_PATH if hip_file_path is not provided
if hip_file_path is None:
hip_file_path = SWR_HIP_FILE_PATH
"""Run Houdini in headless mode to generate sidewalks and roads"""
try:
unreal.log(f"Starting Houdini sidewalks & roads generation with iteration number: {iteration_number}")
unreal.log(f"Houdini install path: {houdini_install_path}")
unreal.log(f"Hip file path: {hip_file_path}")
# Check if the hip file exists
if not os.path.exists(hip_file_path):
unreal.log_error(f"Houdini .hip file not found at: {hip_file_path}")
return None
else:
unreal.log(f"Houdini .hip file found at: {hip_file_path}")
# Get the path to the Houdini Python interpreter (hython)
hython_path = os.path.join(houdini_install_path, "bin", "hython.exe")
if not os.path.exists(hython_path):
unreal.log_error(f"Hython not found at: {hython_path}")
return None
else:
unreal.log(f"Hython found at: {hython_path}")
# Get the path to the headless script
headless_script = os.path.join(SCRIPTS_DIR, "200_headless_topnet_SWR.py")
if not os.path.exists(headless_script):
unreal.log_error(f"Headless script not found at: {headless_script}")
return None
else:
unreal.log(f"Headless script found at: {headless_script}")
# Helper function to normalize paths to use forward slashes
def normalize_path(path):
"""Convert Windows path to use forward slashes"""
return path.replace('\\', '/')
# Define input/output paths with forward slashes for Houdini
road_fbx_path = normalize_path(os.path.join(SWR_FBX_OUTPUT_DIR, f"road_{iteration_number}.fbx"))
sidewalks_fbx_path = normalize_path(os.path.join(SWR_FBX_OUTPUT_DIR, f"sidewalks_{iteration_number}.fbx"))
# Define the splines base path (without iteration number and file extension)
splines_base_path = normalize_path(os.path.join(SPLINES_OUTPUT_DIR, "splines_export_from_UE_"))
# Ensure output directories exist
os.makedirs(os.path.dirname(road_fbx_path), exist_ok=True)
os.makedirs(os.path.dirname(sidewalks_fbx_path), exist_ok=True)
# Log the normalized paths
unreal.log(f"Normalized road FBX path: {road_fbx_path}")
unreal.log(f"Normalized sidewalks FBX path: {sidewalks_fbx_path}")
unreal.log(f"Normalized splines base path: {splines_base_path}")
# Build the command to run Houdini in headless mode
# Important: Use a string with proper quoting instead of a list to avoid issues with script path parsing
cmd_str = f'"{hython_path}" "{headless_script}" --hip "{hip_file_path}" --topnet "/project/" --rop_fbx_road_path "{road_fbx_path}" --rop_fbx_sidewalks_path "{sidewalks_fbx_path}" --iteration_number {iteration_number} --switch_bool {switch_bool}'
# Add base_path parameter if provided
if base_path is not None:
base_path = normalize_path(base_path)
cmd_str += f' --base_path "{base_path}"'
unreal.log(f"Adding base_path: {base_path}")
# Add fbx_folder_path to cmd if provided (new approach)
if fbx_folder_path is not None:
fbx_folder_path = normalize_path(fbx_folder_path)
cmd_str += f' --fbx_folder_path "{fbx_folder_path}"'
unreal.log(f"Adding fbx_folder_path: {fbx_folder_path}")
# If we're using fbx_folder_path, also add the transforms_json_path
# This is automatically constructed based on the fbx_folder_path
if transforms_json_path is None:
# Construct the transforms.json path from the fbx_folder_path
transforms_json_path = os.path.join(fbx_folder_path, "transforms.json")
transforms_json_path = normalize_path(transforms_json_path)
cmd_str += f' --transforms_json_path "{transforms_json_path}"'
unreal.log(f"Adding transforms_json_path: {transforms_json_path}")
# Legacy support for file1_path
elif file1_path is not None:
# Use the provided file1_path
file1_path = normalize_path(file1_path)
cmd_str += f' --file1_path "{file1_path}"'
unreal.log(f"Adding legacy file1_path: {file1_path}")
else:
# Default
file_1_path = FILE1_PATH
cmd_str += f' --file1_path "{file_1_path}"'
unreal.log(f"Adding default legacy file1_path: {file_1_path}")
# For logging purposes, also create the command as a list
cmd_list = [
hython_path,
headless_script,
"--hip", hip_file_path,
"--topnet", "/project/",
"--rop_fbx_road_path", road_fbx_path,
"--rop_fbx_sidewalks_path", sidewalks_fbx_path,
"--iteration_number", str(iteration_number),
"--switch_bool", str(switch_bool)
]
# Add fbx_folder_path to cmd_list if provided (new approach)
if fbx_folder_path is not None:
cmd_list.extend(["--fbx_folder_path", fbx_folder_path])
# Legacy support for file1_path
elif file1_path is not None:
cmd_list.extend(["--file1_path", file1_path])
elif os.path.exists(splines_json_path):
cmd_list.extend(["--file1_path", splines_json_path])
# Add base_path to cmd_list if provided
if base_path is not None:
cmd_list.extend(["--base_path", base_path])
unreal.log(f"Launching Houdini sidewalks & roads generation...")
unreal.log(f"Command: {' '.join(cmd_list)}")
# Check if the splines directory exists and has files
splines_dir = os.path.dirname(splines_base_path)
if not os.path.exists(splines_dir):
unreal.log_warning(f"Splines directory does not exist: {splines_dir}")
os.makedirs(splines_dir, exist_ok=True)
else:
# Check for the specific spline file we're expecting
expected_spline_file = f"splines_export_from_UE_{iteration_number}.json"
spline_files = [f for f in os.listdir(splines_dir) if f.endswith('.json')]
if expected_spline_file not in spline_files:
unreal.log_warning(f"Expected spline file not found: {expected_spline_file}")
unreal.log_warning(f"You may need to run the spline export script first")
unreal.log_warning(f"Available spline files: {spline_files}")
# Run the command - don't create a new console window when running from UE
unreal.log("Running Houdini process...")
process = subprocess.Popen(
cmd_str,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True # Required when passing a command string instead of a list
# No creationflags - this causes issues when running as a subprocess from UE
)
# Wait a short time and check if process is still running
time.sleep(2)
if process.poll() is not None:
# Process has already terminated
stdout, stderr = process.communicate()
unreal.log_error(f"Houdini process terminated immediately with exit code: {process.returncode}")
unreal.log(f"STDOUT: {stdout}")
unreal.log_error(f"STDERR: {stderr}")
return None
unreal.log(f"Houdini process started with PID: {process.pid}")
unreal.log(f"Process is running. Waiting for completion...")
# Wait for process to complete with a timeout
try:
stdout, stderr = process.communicate(timeout=30) # 30 second timeout
unreal.log(f"Houdini process completed with exit code: {process.returncode}")
unreal.log(f"STDOUT: {stdout}")
if stderr:
unreal.log_error(f"STDERR: {stderr}")
except subprocess.TimeoutExpired:
unreal.log("Houdini process is still running after timeout. Continuing in background.")
# Don't kill the process, let it continue running
# Check if the output files were created
road_fbx_exists = os.path.exists(road_fbx_path)
sidewalks_fbx_exists = os.path.exists(sidewalks_fbx_path)
if not road_fbx_exists:
unreal.log_warning(f"Road FBX file was not created: {road_fbx_path}")
else:
unreal.log(f"Road FBX file exists at: {road_fbx_path}")
unreal.log(f"File size: {os.path.getsize(road_fbx_path)} bytes")
if not sidewalks_fbx_exists:
unreal.log_warning(f"Sidewalks FBX file was not created: {sidewalks_fbx_path}")
else:
unreal.log(f"Sidewalks FBX file exists at: {sidewalks_fbx_path}")
unreal.log(f"File size: {os.path.getsize(sidewalks_fbx_path)} bytes")
return {
'process_id': process.pid,
'road_fbx_path': road_fbx_path,
'sidewalks_fbx_path': sidewalks_fbx_path,
'status': 'running' if process.poll() is None else 'completed',
'road_fbx_exists': road_fbx_exists,
'sidewalks_fbx_exists': sidewalks_fbx_exists
}
except Exception as e:
unreal.log_error(f"Error launching Houdini: {str(e)}")
return None
# Run Houdini sidewalks & roads generation
result = run_houdini_sidewalks_roads(
iteration_number=ITERATION_NUMBER,
houdini_install_path=HOUDINI_INSTALL_PATH,
hip_file_path=SWR_HIP_FILE_PATH,
fbx_folder_path=FBX_FOLDER_PATH, # Use the new approach with folder path
# file1_path=FILE1_PATH, # Legacy approach (commented out)
base_path=SPLINES_BASEPATH_OUTPUT_DIR,
switch_bool=SWITCH_BOOL
)
unreal.log(f"Houdini sidewalks & roads generation result: {result}")
# Reimport static meshes
# result = run_script("210_reimport_SM.py", "reimport_folder_static_meshes",
# iteration_number=ITERATION_NUMBER,
# fbx_dir=SWR_FBX_OUTPUT_DIR)
# unreal.log(f"Reimport static meshes result: {result}")
# Add sidewalks & roads to level (uncomment to use)
# result = run_script("220_add_SM_to_lvl.py", "add_SM_sidewalks_and_roads_to_level",
# iteration_number=ITERATION_NUMBER)
# unreal.log(f"Add sidewalks & roads result: {result}")
unreal.log("Script execution completed")
|
"""
code for the main script editor window
"""
import ast
import os
import sys
import traceback
from collections import namedtuple
try:
import unreal
RUNNING_IN_UNREAL = True
except:
RUNNING_IN_UNREAL = False
from PySide2 import QtWidgets, QtCore
from PySide2 import QtUiTools
from . import outputTextWidget
from .codeEditor import codeEditor
from .codeEditor.highlighter import pyHighlight
APP = None
WINDOW = None
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
MODULE_NAME = os.path.basename(MODULE_PATH)
UI_PATH = os.path.join(MODULE_PATH, 'ui', 'script_editor.ui')
CONFIG_PATH = os.path.join(MODULE_PATH, 'config.txt')
ICONS_PATH = os.path.join(MODULE_PATH, 'icons')
QtCore.QDir.addSearchPath("ICONS", ICONS_PATH)
class TabConfig(namedtuple('TabConfig', ['index', 'label', 'active', 'command'])):
"""
Dataclass to store python script information in the tabs
:param index: int. script tab index within the tab widget
:param label: str. script tab title label
:param active: bool. whether this tab is set to active (current)
only one tab is allowed to be active
:param command: str. script in the tab
"""
__slots__ = ()
class ScriptEditorWindow(QtWidgets.QMainWindow):
"""
Script Editor main window
"""
def __init__(self, parent=None):
"""
Initialization
"""
super(ScriptEditorWindow, self).__init__(parent)
loader = QtUiTools.QUiLoader()
file = QtCore.QFile(os.path.abspath(UI_PATH))
if file.open(QtCore.QFile.ReadOnly):
self.ui = loader.load(file, parent)
file.close()
self.main_layout.addWidget(self.ui)
self.ui.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
splitter = QtWidgets.QSplitter()
splitter.setOrientation(QtCore.Qt.Vertical)
self.ui.centralwidget.layout().addWidget(splitter)
self.ui_log_edit = outputTextWidget.OutputTextWidget()
splitter.addWidget(self.ui.ui_log_edit)
splitter.addWidget(self.ui.ui_tab_widget)
self.ui_tabs = list()
self.ui_tab_highlighters = list()
self.register_traceback()
self.load_configs()
#
self.ui.ui_run_all_btn.clicked.connect(self.execute)
self.ui.ui_run_sel_btn.clicked.connect(self.execute_sel)
self.ui.ui_clear_log_btn.clicked.connect(self.clear_log)
self.ui.ui_clear_script_btn.clicked.connect(self.clear_script)
self.ui.ui_clear_both_btn.clicked.connect(self.clear_all)
self.ui.ui_save_action.triggered.connect(self.save_script)
self.ui.ui_open_action.triggered.connect(self.open_script)
self.ui.ui_tab_widget.tabBarClicked.connect(self.add_tab)
self.ui.ui_tab_widget.tabCloseRequested.connect(self.remove_tab)
# region Overrides
def closeEvent(self, event):
"""
Override: close the tool automatically saves out the script configs
"""
self.save_configs()
super(ScriptEditorWindow, self).closeEvent(event)
def register_traceback(self):
"""
Link Unreal traceback
"""
def custom_traceback(exc_type, exc_value, exc_traceback=None):
message = 'Error: {}: {}\n'.format(exc_type, exc_value)
if exc_traceback:
format_exception = traceback.format_tb(exc_traceback)
for line in format_exception:
message += line
self.ui_log_edit.update_logger(message, 'error')
sys.excepthook = custom_traceback
# endregion
# region Config
def save_configs(self):
"""
Save all current python tabs' config
"""
configs = list()
active_index = self.ui.ui_tab_widget.currentIndex()
for i in range(self.ui.ui_tab_widget.count()-1):
self.ui.ui_tab_widget.setCurrentIndex(i)
script_tab = self.ui.ui_tab_widget.currentWidget()
label = self.ui.ui_tab_widget.tabText(i)
active = active_index == i
config = TabConfig(i, label, active, script_tab.toPlainText())
configs.append(config)
# go back to the previous active tab
self.ui.ui_tab_widget.setCurrentIndex(active_index)
with open(CONFIG_PATH, 'w') as f:
string = [config._asdict() for config in configs]
f.write(str(string))
def load_configs(self):
"""
During startup, load python script config file and initialize tab gui
"""
if not os.path.exists(CONFIG_PATH):
self.load_tabs()
return
with open(CONFIG_PATH, 'r') as f:
tab_configs = list()
tab_config_dicts = ast.literal_eval(f.read())
for tab_config_dict in tab_config_dicts:
tab_config = TabConfig(**tab_config_dict)
tab_configs.append(tab_config)
self.load_tabs(tab_configs)
def load_tabs(self, tab_configs=None):
"""
Initialize python script tab gui from config object
:param tab_configs: TabConfig. dataclass object storing python tab info
"""
if not tab_configs:
tab_configs = [TabConfig(0, 'Python', True, '')]
active_index = 0
for tab_config in tab_configs:
self.insert_tab(tab_config.index, tab_config.command, tab_config.label)
if tab_config.active:
active_index = tab_config.index
self.ui.ui_tab_widget.setCurrentIndex(active_index)
def insert_tab(self, index, command, label):
"""
Insert a python tab into the tab widget
:param index: int. tab index to insert
:param command: str. python script command to add to the inserted tab
:param label: str. title/label of the tab inserted
"""
script_edit = codeEditor.CodeEditor()
script_edit.setPlainText(command)
highlight = pyHighlight.PythonHighlighter(
script_edit.document())
self.ui.ui_tab_widget.insertTab(index, script_edit, label)
self.ui_tab_highlighters.append(highlight)
self.ui_tabs.append(script_edit)
self.ui.ui_tab_widget.setCurrentIndex(index)
# endregion
# region Execution
def execute(self):
"""
Send all command in script area for mya to execute
"""
command = self.ui.ui_tab_widget.currentWidget().toPlainText()
if RUNNING_IN_UNREAL:
output = unreal.PythonScriptLibrary.execute_python_command_ex(
python_command=command,
execution_mode=unreal.PythonCommandExecutionMode.EXECUTE_FILE,
file_execution_scope=unreal.PythonFileExecutionScope.PUBLIC
)
if not output:
return
self.ui_log_edit.update_logger(
"# Command executed: \n"
"{}\n"
"# Command execution ended".format(command)
)
self.send_formatted_output(output)
else:
# todo this wont get any output, fix it
output = None
exec(command)
def execute_sel(self):
"""
Send selected command in script area for mya to execute
"""
command = self.ui.ui_tab_widget.currentWidget().textCursor().selection().toPlainText()
if RUNNING_IN_UNREAL:
output = unreal.PythonScriptLibrary.execute_python_command_ex(
python_command=command,
execution_mode=unreal.PythonCommandExecutionMode.EXECUTE_FILE,
file_execution_scope=unreal.PythonFileExecutionScope.PUBLIC
)
if not output:
return
self.ui_log_edit.update_logger(
"# Command executed: \n"
"{}\n"
"# Command execution ended".format(command)
)
self.send_formatted_output(output)
else:
# todo this wont get any output, fix it
output = None
exec(command)
def send_formatted_output(self, output):
"""
Update ui field with messages
"""
if not output:
return
result, log_entries = output
for entry in log_entries:
if entry.type != unreal.PythonLogOutputType.INFO:
self.ui_log_edit.update_logger(entry.output, 'error')
else:
self.ui_log_edit.update_logger(entry.output, 'info')
def clear_log(self):
"""
Clear history logging area
"""
self.ui_log_edit.clear()
def clear_script(self):
self.ui.ui_tab_widget.currentWidget().setPlainText('')
def clear_all(self):
self.clear_script()
self.clear_log()
# endregion
# region Tab Operation
def add_tab(self, index):
"""
Add a python tab when 'Add' tab button is clicked
"""
if index == self.ui.ui_tab_widget.count() - 1:
self.insert_tab(index, '', 'Python')
def remove_tab(self, index):
"""
Remove a python tab
:param index: int. removal tab index
"""
msg_box = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Question,
'',
'Delete the Current Tab?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
usr_choice = msg_box.exec()
if usr_choice == QtWidgets.QMessageBox.Yes:
if index != self.ui.ui_tab_widget.count() - 1:
self.ui.ui_tab_widget.removeTab(index)
self.ui.ui_tab_widget.setCurrentIndex(index-1)
# endregion
# region File IO
def open_script(self):
"""
Open python file to script edit area
"""
path = QtWidgets.QFileDialog.getOpenFileName(
None,
"Open Script",
MODULE_PATH,
filter="*.py")[0]
if not path:
return
with open(path, 'r') as f:
file_name = os.path.basename(path)
output = f.read()
index = self.ui.ui_tab_widget.count() - 1
self.insert_tab(index, output, file_name)
def save_script(self):
"""
Save script edit area as a python file
"""
path = QtWidgets.QFileDialog.getSaveFileName(
None,
"Save Script As...",
MODULE_PATH,
filter="*.py")[0]
if not path:
return
command = self.ui.ui_tab_widget.currentWidget().toPlainText()
with open(path, 'w') as f:
f.write(command)
# endregion
def show():
global APP
global WINDOW
APP = QtWidgets.QApplication.instance() or QtWidgets.QApplication(sys.argv)
import unreal_stylesheet
unreal_stylesheet.setup()
# handles existing instance
WINDOW = WINDOW or ScriptEditorWindow()
WINDOW.show()
if RUNNING_IN_UNREAL:
unreal.parent_external_window_to_slate(int(WINDOW.winId()))
return WINDOW
|
import unreal
from Utilities.Utils import Singleton
class AModalWindow(metaclass=Singleton):
def __init__(self, json_path: str):
self.json_path = json_path
self.data: unreal.ChameleonData = unreal.PythonBPLib.get_chameleon_data(self.json_path)
self.ui_buttons_room = "SomePlaceForButtons"
self.button_count = 0
def add_button(self):
print("add_button call.")
button_json_content = """
{
"SButton": {
"Text": "PlaceHolder Button"
, "ContentPadding": [-8, 0]
, "VAlign": "Center"
, "HAlign": "Center"
}
}
"""
self.data.append_slot_from_json(self.ui_buttons_room, button_json_content)
self.button_count += 1
def remove_button(self):
print("remove_button call.")
self.data.remove_widget_at(self.ui_buttons_room, 0)
self.button_count -= 1
|
import unreal
def log(message):
"""ะัะฟะพะผะพะณะฐัะตะปัะฝะฐั ััะฝะบัะธั ะดะปั ะปะพะณะธัะพะฒะฐะฝะธั"""
unreal.log(f"[Actor List] {message}")
print(f"[Actor List] {message}")
def list_light_properties(light_obj):
"""ะัะฒะพะดะธั ะฒัะต ัะฒะพะนััะฒะฐ ะพะฑัะตะบัะฐ"""
properties = []
for prop in dir(light_obj):
if not prop.startswith('_'): # ะัะพะฟััะบะฐะตะผ ะฒะฝัััะตะฝะฝะธะต ัะฒะพะนััะฒะฐ
try:
value = getattr(light_obj, prop)
if not callable(value): # ะัะพะฟััะบะฐะตะผ ะผะตัะพะดั
properties.append(f"{prop}: {value}")
except Exception:
continue
return properties
def list_all_lights():
"""ะัะฒะพะดะธั ะธะฝัะพัะผะฐัะธั ัะพะปัะบะพ ะพ ัะฒะตัะธะปัะฝะธะบะฐั
"""
log("Getting all lights in the scene...")
try:
world = unreal.EditorLevelLibrary.get_editor_world()
all_lights = unreal.GameplayStatics.get_all_actors_of_class(world, unreal.SpotLight)
log(f"Found {len(all_lights)} spot lights")
log("=== Listing all spot lights ===")
# ะะตัะตะผ ัะพะปัะบะพ ะฟะตัะฒัะน ัะฒะตัะธะปัะฝะธะบ ะดะปั ะดะตัะฐะปัะฝะพะณะพ ะฐะฝะฐะปะธะทะฐ
if all_lights:
light = all_lights[0]
light_name = light.get_name()
log(f"Analyzing light: '{light_name}'")
# ะะพะปััะฐะตะผ ะบะพะผะฟะพะฝะตะฝั ัะฒะตัะธะปัะฝะธะบะฐ
try:
light_component = light.light_component
if light_component:
comp_name = light_component.get_name()
log(f"Component name: '{comp_name}'")
# ะัะฒะพะดะธะผ ะฒัะต ัะฒะพะนััะฒะฐ ะบะพะผะฟะพะฝะตะฝัะฐ
log("Component properties:")
properties = list_light_properties(light_component)
for prop in properties:
log(f" {prop}")
# ะัะพะฑัะตะผ ะฟะพะปััะธัั ะทะฝะฐัะตะฝะธะต ะธะฝัะตะฝัะธะฒะฝะพััะธ ัะฐะทะฝัะผะธ ัะฟะพัะพะฑะฐะผะธ
try:
intensity = light_component.intensity
log(f"Direct intensity access: {intensity}")
except Exception as e:
log(f"Error accessing intensity directly: {str(e)}")
try:
intensity = getattr(light_component, "intensity", None)
log(f"Intensity via getattr: {intensity}")
except Exception as e:
log(f"Error accessing intensity via getattr: {str(e)}")
except Exception as e:
log(f"Error analyzing light component: {str(e)}")
log("=== End of analysis ===")
except Exception as e:
log(f"Error: {str(e)}")
if __name__ == '__main__':
list_all_lights()
|
# -*- coding: utf-8 -*-
import unreal
import sys
import pydoc
import inspect
import os
from enum import IntFlag
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def has_instance(cls):
return cls in cls._instances
def get_instance(cls):
if cls in cls._instances:
return cls._instances[cls]
return None
def cast(object_to_cast, object_class):
try:
return object_class.cast(object_to_cast)
except:
return None
# short cut for print dir
def d(obj, subString=''):
subString = subString.lower()
for x in dir(obj):
if subString == '' or subString in x.lower():
print(x)
def l(obj, subString='', bPrint = True):
'''
่พๅบ็ฉไฝ่ฏฆ็ปไฟกๆฏ๏ผๅฝๆฐใๅฑๆง๏ผeditorProperty็ญ๏ผๅนถไปฅlogๅฝขๅผ่พๅบ
:param obj: Objectๅฎไพๆ่
็ฑป
:param subString: ่ฟๆปค็จๅญ็ฌฆไธฒ
:return: ๆ
'''
def _simplifyDoc(content):
bracketS, bracketE = content.find('('), content.find(')')
arrow = content.find('->')
funcDocPos = len(content)
endSign = ['--', '\n', '\r']
for s in endSign:
p = content.find(s)
if p != -1 and p < funcDocPos:
funcDocPos = p
funcDoc = content[:funcDocPos]
param = content[bracketS + 1: bracketE].strip()
return funcDoc, param
def _getEditorProperties(content, obj):
lines = content.split('\r')
signFound = False
allInfoFound = False
result = []
for line in lines:
if not signFound and '**Editor Properties:**' in line:
signFound = True
if signFound:
#todo re
nameS, nameE = line.find('``') + 2, line.find('`` ')
if nameS == -1 or nameE == -1:
continue
typeS, typeE = line.find('(') + 1, line.find(')')
if typeS == -1 or typeE == -1:
continue
rwS, rwE = line.find('[') + 1, line.find(']')
if rwS == -1 or rwE == -1:
continue
name = line[nameS: nameE]
type = line[typeS: typeE]
rws = line[rwS: rwE]
descript = line[rwE + 2:]
allInfoFound = True
result.append((name, type, rws, descript))
if signFound:
if not allInfoFound:
unreal.log_warning("not all info found {}".format(obj))
else:
unreal.log_warning("can't find editor properties in {}".format(obj))
return result
if obj == None:
unreal.log_warning("obj == None")
return None
if inspect.ismodule(obj):
return None
ignoreList = ['__delattr__', '__getattribute__', '__hash__', '__init__', '__setattr__']
propertiesNames = []
builtinCallableNames = []
otherCallableNames = []
for x in dir(obj):
if subString == '' or subString in x.lower():
attr = getattr(obj, x)
if callable(attr):
if inspect.isbuiltin(attr): # or inspect.isfunction(attr) or inspect.ismethod(attr):
builtinCallableNames.append(x)
else:
# Not Built-in
otherCallableNames.append(x)
else:
# Properties
propertiesNames.append(x)
# 1 otherCallables
otherCallables = []
for name in otherCallableNames:
descriptionStr = ""
if name == "__doc__":
resultStr = "ignored.." #docๅคช้ฟ๏ผไธ่พๅบ
else:
resultStr = "{}".format(getattr(obj, name))
otherCallables.append([name, (), descriptionStr, resultStr])
# 2 builtinCallables
builtinCallables = []
for name in builtinCallableNames:
attr = getattr(obj, name)
descriptionStr = ""
resultStr = ""
bHasParameter = False
if hasattr(attr, '__doc__'):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
if paramStr == '':
# Method with No params
descriptionStr = docForDisplay[docForDisplay.find(')') + 1:]
if '-> None' not in docForDisplay:
resultStr = "{}".format(attr.__call__())
else:
resultStr = 'skip call'
else:
# ๆๅๅฝๆฐ
descriptionStr = paramStr
bHasParameter = True
resultStr = ""
else:
pass
builtinCallables.append([name, (bHasParameter,), descriptionStr, resultStr])
# 3 properties
editorPropertiesInfos = []
editorPropertiesNames = []
if hasattr(obj, '__doc__') and isinstance(obj, unreal.Object):
editorPropertiesInfos = _getEditorProperties(obj.__doc__, obj)
for name, _, _, _ in editorPropertiesInfos:
editorPropertiesNames.append(name)
properties = []
for name in propertiesNames:
descriptionStr = ""
if name == "__doc__":
resultStr = "ignored.." #docๅคช้ฟ๏ผไธ่พๅบ
else:
try:
resultStr = "{}".format(getattr(obj, name))
except:
resultStr = ""
isAlsoEditorProperty = name in editorPropertiesNames #ๆฏๅฆๅๆถๆฏEditorProprtyๅๆถไนๆฏproperty
properties.append([name, (isAlsoEditorProperty,), descriptionStr, resultStr])
# 4 editorProperties
editorProperties = []
propertyAlsoEditorPropertyCount = 0
for info in editorPropertiesInfos:
name, type, rw, descriptionStr = info
if subString == '' or subString in name.lower(): #่ฟๆปคๆไธ้่ฆ็
try:
value = eval('obj.get_editor_property("{}")'.format(name))
except:
value = ""
descriptionStr = "[{}]".format(rw)
resultStr = "{}".format(value)
isAlsoProperty = name in propertiesNames
if isAlsoProperty:
propertyAlsoEditorPropertyCount += 1
editorProperties.append( [name, (isAlsoProperty,), descriptionStr, resultStr])
strs = []
strs.append("Detail: {}".format(obj))
formatWidth = 70
for info in otherCallables:
name, flags, descriptionStr, resultStr = info
# line = "\t{} {}{}{}".format(name, descriptionStr, " " *(formatWidth -1 - len(name) - len(descriptionStr)), resultStr)
line = "\t{} {}".format(name, descriptionStr)
line += "{}{}".format(" " * (formatWidth-len(line)+1-4), resultStr)
strs.append(line)
for info in builtinCallables:
name, flags, descriptionStr, resultStr = info
if flags[0]: # ๆๅๅฝๆฐ
# line = "\t{}({}) {} {}".format(name, descriptionStr, " " * (formatWidth - 5 - len(name) - len(descriptionStr)), resultStr)
line = "\t{}({})".format(name, descriptionStr)
line += "{}{}".format(" " * (formatWidth-len(line)+1-4), resultStr)
else:
# line = "\t{}() {} |{}| {}".format(name, descriptionStr, "-" * (formatWidth - 7 - len(name) - len(descriptionStr)), resultStr)
line = "\t{}() {}".format(name, descriptionStr)
line += "|{}| {}".format("-" * (formatWidth-len(line)+1-4-3), resultStr)
strs.append(line)
for info in properties:
name, flags, descriptionStr, resultStr = info
sign = "**" if flags[0] else ""
# line = "\t\t{} {} {}{}{}".format(name, sign, descriptionStr, " " * (formatWidth - 6 - len(name) -len(sign) - len(descriptionStr)), resultStr)
line = "\t\t{} {} {}".format(name, sign, descriptionStr)
line += "{}{}".format(" " * (formatWidth-len(line)+2-8), resultStr)
strs.append(line)
strs.append("Special Editor Properties:")
for info in editorProperties:
name, flags, descriptionStr, resultStr = info
if flags[0]:
pass # ๅทฒ็ป่พๅบ่ฟ่ทณ่ฟ
else:
sign = "*"
# line = "\t\t{0} {1} {3}{4} {2}".format(name, sign, descriptionStr, " " * (formatWidth - 3 - len(name) -len(sign) ), resultStr) #descriptionStr ไธญๆฏ[rw]ๆพๅฐๆๅๆพ็คบ
line = "\t\t{} {}".format(name, sign)
line += "{}{} {}".format(" " * (formatWidth-len(line)+2-8), resultStr, descriptionStr) # descriptionStr ไธญๆฏ[rw]ๆพๅฐๆๅๆพ็คบ
strs.append(line)
if bPrint:
for l in strs:
print(l)
print("'*':Editor Property, '**':Editor Property also object attribute.")
print("{}: matched, builtinCallable: {} otherCallables: {} prop: {} EditorProps: {} both: {}".format(obj
, len(builtinCallables), len(otherCallables), len(properties), len(editorProperties), propertyAlsoEditorPropertyCount))
return otherCallables, builtinCallables, properties, editorProperties
# short cut for print type
def t(obj):
print(type(obj))
# unreal type to Python dict
def ToJson(v):
tp = type(v)
if tp == unreal.Transform:
result = {'translation': ToJson(v.translation), 'rotation': ToJson(v.rotation), 'scale3d': ToJson(v.scale3d)}
return result
elif tp == unreal.Vector:
return {'x': v.x, 'y': v.y, 'z': v.z}
elif tp == unreal.Quat:
return {'x': v.x, 'y': v.y, 'z': v.z, 'w': v.w}
else:
print("Error type: " + str(tp) + " not implemented.")
return None
def get_selected_comps():
return unreal.PythonBPLib.get_selected_components()
def get_selected_comp():
comps = unreal.PythonBPLib.get_selected_components()
return comps[0] if len(comps) > 0 else None
def get_selected_asset():
selected = unreal.PythonBPLib.get_selected_assets_paths()
if selected:
return unreal.load_asset(unreal.PythonBPLib.get_selected_assets_paths()[0])
else:
return None
def get_selected_assets():
assets = []
for path in unreal.PythonBPLib.get_selected_assets_paths():
asset = unreal.load_asset(path)
if (asset != None):
assets.append(asset)
return assets
def get_selected_actors():
return unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
def get_selected_actor():
actors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
return actors[0] if len(actors) > 0 else None
def set_preview_es31():
unreal.PythonBPLib.set_preview_platform("GLSL_ES3_1_ANDROID", "ES3_1")
def set_preview_sm5():
unreal.PythonBPLib.set_preview_platform("", "SM5")
# todo: create export tools for create help/dir to file
def export_dir(filepath, cls):
f = open(filepath, 'w')
sys.stdout = f
for x in sorted(dir(cls)):
print(x)
sys.stdout = sys.__stdout__
f.close()
def export_help(filepath, cls):
f = open(filepath, 'w')
sys.stdout = f
pydoc.help(cls)
sys.stdout = sys.__stdout__
f.close()
# ไฟฎๆนsite.py ๆไปถไธญ็Encoding
def set_default_encoding(encodingStr):
pythonPath = os.path.dirname(sys.path[0])
if not os.path.exists(pythonPath):
unreal.PythonBPLib.message_dialog("can't find python folder: {}".format(pythonPath), "Warning")
return
sitePyPath = pythonPath + "/project/.py"
if not os.path.exists(sitePyPath):
unreal.PythonBPLib.message_dialog("can't find site.py: {}".format(sitePyPath), "Warning")
return
#็ฎๅๆฅๆพๅญ็ฌฆไธฒๆฟๆข
with open(sitePyPath, "r") as f:
lines = f.readlines()
startLine = -1
endLine = -1
for i in range(len(lines)):
if startLine == -1 and lines[i][:len('def setencoding():')] == 'def setencoding():':
startLine = i
continue
if endLine == -1 and startLine > -1 and lines[i].startswith('def '):
endLine = i
print("startLine: {} endLine: {}".format(startLine, endLine))
changedLineCount = 0
if -1 < startLine and startLine < endLine:
linePosWithIf = []
for i in range(startLine + 1, endLine):
if lines[i].lstrip().startswith('if '):
linePosWithIf.append(i)
print(lines[i])
if len(linePosWithIf) != 4:
unreal.PythonBPLib.message_dialog("Find pos failed: {}".format(sitePyPath), "Warning")
print(linePosWithIf)
return
lines[linePosWithIf[2]] = lines[linePosWithIf[2]].replace("if 0", "if 1") # ็ฎๅไฟฎๆน็ฌฌไธไธชifๆๅจ่ก็ๅ
ๅฎน
changedLineCount += 1
for i in range(linePosWithIf[2] + 1, linePosWithIf[3]):
line = lines[i]
if "encoding=" in line.replace(" ", ""):
s = line.find('"')
e = line.find('"', s+1)
if s > 0 and e > s:
lines[i] = line[:s+1] + encodingStr + line[e:]
changedLineCount += 1
break
if changedLineCount == 2:
with open(sitePyPath, 'w') as f:
f.writelines(lines)
unreal.PythonBPLib.notification("Success: {}".format(sitePyPath), 0)
currentEncoding = sys.getdefaultencoding()
if currentEncoding == encodingStr:
unreal.PythonBPLib.notification("ๅทฒๅฐdefault encoding่ฎพ็ฝฎไธบ{}".format(currentEncoding), 0)
else:
unreal.PythonBPLib.message_dialog("ๅทฒๅฐdefault encoding่ฎพ็ฝฎไธบ{}๏ผ้่ฆ้ๅฏ็ผ่พๅจไปฅไพฟ็ๆ".format(encodingStr), "Warning")
else:
unreal.PythonBPLib.message_dialog("Find content failed: {}".format(sitePyPath), "Warning")
def get_actors_at_location(location, error_tolerance):
allActors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
result = [_actor for _actor in allActors if _actor.get_actor_location().is_near_equal(location, error_tolerance)]
return result
def select_actors_at_location(location, error_tolerance, actorTypes=None):
actors = get_actors_at_location(location, error_tolerance)
if len(actors) > 1:
print("Total {} actor(s) with the same locations.".format(len(actors)))
if actorTypes is not None:
actors = [actor for actor in actors if type(actor) in actorTypes]
unreal.get_editor_subsystem(unreal.EditorActorSubsystem).set_selected_level_actors(actors)
return actors
else:
print("None actor with the same locations.")
return []
def select_actors_with_same_location(actor, error_tolerance):
if actor is not None:
actors = select_actors_at_location(actor.get_actor_location(), error_tolerance, [unreal.StaticMeshActor, unreal.SkeletalMeshActor])
return actors
else:
print("actor is None.")
return []
def get_chameleon_tool_instance(json_name):
found_count = 0
result = None
for var in globals():
if hasattr(var, "jsonPath") and hasattr(var, "data"):
if isinstance(var.data, unreal.ChameleonData):
if var.jsonPath.endswith(json_name):
found_count += 1
result = var
if found_count == 1:
return result
if found_count > 1:
unreal.log_warning(f"Found Multi-ToolsInstance by name: {json_name}, count: {found_count}")
return None
#
# Flags describing an object instance
#
class EObjectFlags(IntFlag):
# Do not add new flags unless they truly belong here. There are alternatives.
# if you change any the bit of any of the RF_Load flags, then you will need legacy serialization
RF_NoFlags = 0x00000000, #< No flags, used to avoid a cast
# This first group of flags mostly has to do with what kind of object it is. Other than transient, these are the persistent object flags.
# The garbage collector also tends to look at these.
RF_Public =0x00000001, #< Object is visible outside its package.
RF_Standalone =0x00000002, #< Keep object around for editing even if unreferenced.
RF_MarkAsNative =0x00000004, #< Object (UField) will be marked as native on construction (DO NOT USE THIS FLAG in HasAnyFlags() etc)
RF_Transactional =0x00000008, #< Object is transactional.
RF_ClassDefaultObject =0x00000010, #< This object is its class's default object
RF_ArchetypeObject =0x00000020, #< This object is a template for another object - treat like a class default object
RF_Transient =0x00000040, #< Don't save object.
# This group of flags is primarily concerned with garbage collection.
RF_MarkAsRootSet =0x00000080, #< Object will be marked as root set on construction and not be garbage collected, even if unreferenced (DO NOT USE THIS FLAG in HasAnyFlags() etc)
RF_TagGarbageTemp =0x00000100, #< This is a temp user flag for various utilities that need to use the garbage collector. The garbage collector itself does not interpret it.
# The group of flags tracks the stages of the lifetime of a uobject
RF_NeedInitialization =0x00000200, #< This object has not completed its initialization process. Cleared when ~FObjectInitializer completes
RF_NeedLoad =0x00000400, #< During load, indicates object needs loading.
RF_KeepForCooker =0x00000800, #< Keep this object during garbage collection because it's still being used by the cooker
RF_NeedPostLoad =0x00001000, #< Object needs to be postloaded.
RF_NeedPostLoadSubobjects =0x00002000, #< During load, indicates that the object still needs to instance subobjects and fixup serialized component references
RF_NewerVersionExists =0x00004000, #< Object has been consigned to oblivion due to its owner package being reloaded, and a newer version currently exists
RF_BeginDestroyed =0x00008000, #< BeginDestroy has been called on the object.
RF_FinishDestroyed =0x00010000, #< FinishDestroy has been called on the object.
# Misc. Flags
RF_BeingRegenerated =0x00020000, #< Flagged on UObjects that are used to create UClasses (e.g. Blueprints) while they are regenerating their UClass on load (See FLinkerLoad::CreateExport()), as well as UClass objects in the midst of being created
RF_DefaultSubObject =0x00040000, #< Flagged on subobjects that are defaults
RF_WasLoaded =0x00080000, #< Flagged on UObjects that were loaded
RF_TextExportTransient =0x00100000, #< Do not export object to text form (e.g. copy/paste). Generally used for sub-objects that can be regenerated from data in their parent object.
RF_LoadCompleted =0x00200000, #< Object has been completely serialized by linkerload at least once. DO NOT USE THIS FLAG, It should be replaced with RF_WasLoaded.
RF_InheritableComponentTemplate = 0x00400000, #< Archetype of the object can be in its super class
RF_DuplicateTransient =0x00800000, #< Object should not be included in any type of duplication (copy/paste, binary duplication, etc.)
RF_StrongRefOnFrame =0x01000000, #< References to this object from persistent function frame are handled as strong ones.
RF_NonPIEDuplicateTransient =0x02000000, #< Object should not be included for duplication unless it's being duplicated for a PIE session
RF_Dynamic =0x04000000, #< Field Only. Dynamic field. UE_DEPRECATED(5.0, "RF_Dynamic should no longer be used. It is no longer being set by engine code.") - doesn't get constructed during static initialization, can be constructed multiple times # @todo: BP2CPP_remove
RF_WillBeLoaded =0x08000000, #< This object was constructed during load and will be loaded shortly
RF_HasExternalPackage =0x10000000, #< This object has an external package assigned and should look it up when getting the outermost package
# RF_Garbage and RF_PendingKill are mirrored in EInternalObjectFlags because checking the internal flags is much faster for the Garbage Collector
# while checking the object flags is much faster outside of it where the Object pointer is already available and most likely cached.
# RF_PendingKill is mirrored in EInternalObjectFlags because checking the internal flags is much faster for the Garbage Collector
# while checking the object flags is much faster outside of it where the Object pointer is already available and most likely cached.
RF_PendingKill = 0x20000000, #< Objects that are pending destruction (invalid for gameplay but valid objects). UE_DEPRECATED(5.0, "RF_PendingKill should not be used directly. Make sure references to objects are released using one of the existing engine callbacks or use weak object pointers.") This flag is mirrored in EInternalObjectFlags as PendingKill for performance
RF_Garbage =0x40000000, #< Garbage from logical point of view and should not be referenced. UE_DEPRECATED(5.0, "RF_Garbage should not be used directly. Use MarkAsGarbage and ClearGarbage instead.") This flag is mirrored in EInternalObjectFlags as Garbage for performance
RF_AllocatedInSharedPage =0x80000000, #< Allocated from a ref-counted page shared with other UObjects
class EMaterialValueType(IntFlag):
MCT_Float1 = 1,
MCT_Float2 = 2,
MCT_Float3 = 4,
MCT_Float4 = 8,
MCT_Texture2D = 1 << 4,
MCT_TextureCube = 1 << 5,
MCT_Texture2DArray = 1 << 6,
MCT_TextureCubeArray = 1 << 7,
MCT_VolumeTexture = 1 << 8,
MCT_StaticBool = 1 << 9,
MCT_Unknown = 1 << 10,
MCT_MaterialAttributes = 1 << 11,
MCT_TextureExternal = 1 << 12,
MCT_TextureVirtual = 1 << 13,
MCT_VTPageTableResult = 1 << 14,
MCT_ShadingModel = 1 << 15,
MCT_Strata = 1 << 16,
MCT_LWCScalar = 1 << 17,
MCT_LWCVector2 = 1 << 18,
MCT_LWCVector3 = 1 << 19,
MCT_LWCVector4 = 1 << 20,
MCT_Execution = 1 << 21,
MCT_VoidStatement = 1 << 22,
|
import unreal
# --- ์ธ๋ถ ์ฃผ์
๋ณ์ ์๋ด ---
# ๋ธ๋ฃจํ๋ฆฐํธ ๋๋ ๋ค๋ฅธ ์คํฌ๋ฆฝํธ์์ ์ด ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์ ์๋ ๋ณ์๋ค์ ๋ฐ๋์ ์ ์ํด์ผ ํฉ๋๋ค.
# ์์:
#
# master_path = "/project/"
# parameters_str = "emissive_color_mult:0.1; Tint:1,0,0,1"
# do_reparent = True
# do_parameter_edit = True
def parse_parameters_string(params_string):
"""
์ธ๋ฏธ์ฝ๋ก (;)์ผ๋ก ๊ตฌ๋ถ๋ ํ๋ผ๋ฏธํฐ ๋ฌธ์์ด์ ํ์ฑํฉ๋๋ค.
- ์ค์นผ๋ผ: param_name:0.5
- ๋ฒกํฐ: param_name:1,0,0,1 (R,G,B) ๋๋ (R,G,B,A)
- ํ
์ค์ฒ: param_name:/project/.MyTexture
์์: "scalar_param:0.5; vector_param:1,0,0,1; texture_param:/project/"
"""
unreal.log("ํ๋ผ๋ฏธํฐ ๋ฌธ์์ด ํ์ฑ ์์...")
parsed_params = []
if not params_string or not isinstance(params_string, str):
return parsed_params
asset_lib = unreal.EditorAssetLibrary
# ํ๋ผ๋ฏธํฐ ์์ ์ธ๋ฏธ์ฝ๋ก ์ผ๋ก ๋ถ๋ฆฌํฉ๋๋ค.
pairs = [pair.strip() for pair in params_string.split(';') if pair.strip()]
for pair in pairs:
if ':' not in pair:
unreal.log_warning(f" - ํ๋ผ๋ฏธํฐ ํ์ฑ ๊ฒฝ๊ณ : ์๋ชป๋ ํ์์ ์์ ๊ฑด๋๋๋๋ค -> '{pair}'")
continue
key, value_str = pair.split(':', 1)
key = key.strip()
value_str = value_str.strip()
if not key or not value_str:
unreal.log_warning(f" - ํ๋ผ๋ฏธํฐ ํ์ฑ ๊ฒฝ๊ณ : ๋น์ด์๋ ํค ๋๋ ๊ฐ์ ๊ฑด๋๋๋๋ค -> '{pair}'")
continue
if value_str.startswith('/Game/') or value_str.startswith('/Engine/'):
texture_asset = asset_lib.load_asset(value_str)
if isinstance(texture_asset, unreal.Texture):
parsed_params.append({'name': key, 'type': 'texture', 'value': texture_asset})
unreal.log(f" - ํ
์ค์ฒ ํ๋ผ๋ฏธํฐ ๋ฐ๊ฒฌ: '{key}' -> '{value_str}'")
else:
unreal.log_warning(f" - ํ๋ผ๋ฏธํฐ ํ์ฑ ๊ฒฝ๊ณ : ํ
์ค์ฒ๋ฅผ ์ฐพ์ ์ ์๊ฑฐ๋ ์ ํจํ์ง ์์ต๋๋ค -> '{value_str}'")
elif ',' in value_str:
try:
color_parts = [float(c.strip()) for c in value_str.split(',')]
if len(color_parts) == 3:
color = unreal.LinearColor(color_parts[0], color_parts[1], color_parts[2], 1.0)
parsed_params.append({'name': key, 'type': 'vector', 'value': color})
unreal.log(f" - ๋ฒกํฐ ํ๋ผ๋ฏธํฐ ๋ฐ๊ฒฌ: '{key}' -> {color}")
elif len(color_parts) == 4:
color = unreal.LinearColor(color_parts[0], color_parts[1], color_parts[2], color_parts[3])
parsed_params.append({'name': key, 'type': 'vector', 'value': color})
unreal.log(f" - ๋ฒกํฐ ํ๋ผ๋ฏธํฐ ๋ฐ๊ฒฌ: '{key}' -> {color}")
else:
unreal.log_warning(f" - ํ๋ผ๋ฏธํฐ ํ์ฑ ๊ฒฝ๊ณ : ๋ฒกํฐ ๊ฐ์ 3๊ฐ ๋๋ 4๊ฐ์ ์ซ์์ฌ์ผ ํฉ๋๋ค -> '{value_str}'")
except ValueError:
unreal.log_warning(f" - ํ๋ผ๋ฏธํฐ ํ์ฑ ๊ฒฝ๊ณ : ๋ฒกํฐ์ ์ผ๋ถ๋ฅผ ์ซ์๋ก ๋ณํํ ์ ์์ต๋๋ค -> '{value_str}'")
else:
try:
scalar_value = float(value_str)
parsed_params.append({'name': key, 'type': 'scalar', 'value': scalar_value})
unreal.log(f" - ์ค์นผ๋ผ ํ๋ผ๋ฏธํฐ ๋ฐ๊ฒฌ: '{key}' -> {scalar_value}")
except ValueError:
unreal.log_warning(f" - ํ๋ผ๋ฏธํฐ ํ์ฑ ๊ฒฝ๊ณ : ์ค์นผ๋ผ ๊ฐ์ ์ซ์๋ก ๋ณํํ ์ ์์ต๋๋ค -> '{value_str}'")
return parsed_params
def reprocess_selected_materials():
"""
์ฝํ
์ธ ๋ธ๋ผ์ฐ์ ์์ ์ ํ๋ ๋จธํฐ๋ฆฌ์ผ ์ธ์คํด์ค์ ๋ถ๋ชจ๋ฅผ ๊ต์ฒดํ๊ณ ํ๋ผ๋ฏธํฐ๋ฅผ ์์ ํ ๋ค ์ ์ฅํฉ๋๋ค.
"""
unreal.log("์ ํ๋ ๋จธํฐ๋ฆฌ์ผ ์ฌ์ฒ๋ฆฌ ๋ก์ง์ ์์ํฉ๋๋ค.")
# --- ๋ด๋ถ ๋ณ์ ์ฌ์ง์ ---
NEW_PARENT_MATERIAL_PATH = master_path
PARAMETERS_TO_SET = parse_parameters_string(parameters_str)
DO_REPARENT = do_reparent
DO_PARAMETER_EDIT = do_parameter_edit
asset_lib = unreal.EditorAssetLibrary
util_lib = unreal.EditorUtilityLibrary
material_lib = unreal.MaterialEditingLibrary
# 1. ์ ๋ถ๋ชจ ๋จธํฐ๋ฆฌ์ผ ๋ก๋ ๋ฐ ์ ํจ์ฑ ๊ฒ์ฌ
new_parent_material = None
if DO_REPARENT:
new_parent_material = asset_lib.load_asset(NEW_PARENT_MATERIAL_PATH)
if not new_parent_material or not isinstance(new_parent_material, unreal.Material):
unreal.log_error(f"์ค๋ฅ: ์ ๋ง์คํฐ ๋จธํฐ๋ฆฌ์ผ์ ์ฐพ์ ์ ์๊ฑฐ๋ ์ ํจํ์ง ์์ต๋๋ค: {NEW_PARENT_MATERIAL_PATH}")
return
# 2. ์ฝํ
์ธ ๋ธ๋ผ์ฐ์ ์์ ์ ํ๋ ์์
๊ฐ์ ธ์ค๊ธฐ
selected_assets = util_lib.get_selected_assets()
if not selected_assets:
unreal.log_warning("์ค๋ฅ: ์ฝํ
์ธ ๋ธ๋ผ์ฐ์ ์์ ๋จธํฐ๋ฆฌ์ผ ์ธ์คํด์ค๋ฅผ ๋จผ์ ์ ํํด์ฃผ์ธ์.")
return
# 3. ์ ํ๋ ์์
์ค ๋จธํฐ๋ฆฌ์ผ ์ธ์คํด์ค๋ง ํํฐ๋ง
material_instances_to_process = [asset for asset in selected_assets if isinstance(asset, unreal.MaterialInstanceConstant)]
if not material_instances_to_process:
unreal.log_warning("์ค๋ฅ: ์ ํ๋ ์์
์ค์ ์ฒ๋ฆฌํ ๋จธํฐ๋ฆฌ์ผ ์ธ์คํด์ค๊ฐ ์์ต๋๋ค.")
return
unreal.log(f"์ด {len(material_instances_to_process)}๊ฐ์ ๋จธํฐ๋ฆฌ์ผ ์ธ์คํด์ค๋ฅผ ์ฒ๋ฆฌํฉ๋๋ค.")
# 4. ๊ฐ ๋จธํฐ๋ฆฌ์ผ ์ธ์คํด์ค ์ฒ๋ฆฌ
processed_count = 0
with unreal.ScopedEditorTransaction("Reprocess Selected Materials") as transaction:
for mat_instance in material_instances_to_process:
instance_name = mat_instance.get_name()
mat_path = mat_instance.get_path_name()
unreal.log(f"--- '{instance_name}' ({mat_path}) ์ฒ๋ฆฌ ์์ ---")
mat_to_edit = mat_instance
was_modified = False
# 4-1. ๋ถ๋ชจ ๋จธํฐ๋ฆฌ์ผ ๊ต์ฒด
if DO_REPARENT:
mat_instance.modify()
if mat_instance.get_editor_property('parent') != new_parent_material:
unreal.log(f" > ๋ถ๋ชจ ๋ณ๊ฒฝ ์๋: '{new_parent_material.get_name()}'")
material_lib.set_material_instance_parent(mat_instance, new_parent_material)
if mat_instance.get_editor_property('parent') == new_parent_material:
unreal.log(" + ํ์ธ: ๋ถ๋ชจ ๋ณ๊ฒฝ์ด ์ฑ๊ณต์ ์ผ๋ก ์ ์ฉ๋์์ต๋๋ค.")
unreal.log(" > ๋ถ๋ชจ ๋ณ๊ฒฝ์ฌํญ์ ๋์คํฌ์ ์ ์ฅํ๊ณ ์์
์ ๋ค์ ๋ก๋ํฉ๋๋ค...")
asset_lib.save_loaded_asset(mat_instance)
mat_to_edit = asset_lib.load_asset(mat_path)
was_modified = True
else:
unreal.log_warning(f" ์คํจ: '{instance_name}'์ ๋ถ๋ชจ ๋ณ๊ฒฝ์ ์คํจํ์ต๋๋ค. ๋ค์ ์์
์ผ๋ก ๋์ด๊ฐ๋๋ค.")
continue
else:
unreal.log(" > ๋ถ๋ชจ๊ฐ ์ด๋ฏธ ๋ชฉํ ๋จธํฐ๋ฆฌ์ผ์ด๋ฏ๋ก ๋ณ๊ฒฝ์ ๊ฑด๋๋๋๋ค.")
# 4-2. ํ๋ผ๋ฏธํฐ ์์
if DO_PARAMETER_EDIT and PARAMETERS_TO_SET:
mat_to_edit.modify()
unreal.log(f" > '{mat_to_edit.get_name()}'์ ํ๋ผ๋ฏธํฐ ๊ฐ ์์ ์ ์์ํฉ๋๋ค.")
for param_info in PARAMETERS_TO_SET:
param_name = param_info['name']
param_type = param_info['type']
param_value = param_info['value']
success = False
if param_type == 'scalar':
success = material_lib.set_material_instance_scalar_parameter_value(mat_to_edit, param_name, param_value)
elif param_type == 'vector':
success = material_lib.set_material_instance_vector_parameter_value(mat_to_edit, param_name, param_value)
elif param_type == 'texture':
success = material_lib.set_material_instance_texture_parameter_value(mat_to_edit, param_name, param_value)
if success:
unreal.log(f" - '{param_type}' ํ๋ผ๋ฏธํฐ '{param_name}' ๊ฐ์ ์ฑ๊ณต์ ์ผ๋ก ์ค์ ํ์ต๋๋ค.")
was_modified = True
else:
unreal.log(f" - ์ ๋ณด: '{param_name}' ํ๋ผ๋ฏธํฐ๊ฐ ์๊ฑฐ๋ ํ์
์ด ์ผ์นํ์ง ์์ ๊ฑด๋๋๋๋ค.")
# 4-3. ์ต์ข
์ ์ฅ
if was_modified:
unreal.log(f" > ๋ณ๊ฒฝ์ฌํญ์ ์ ์ฅํฉ๋๋ค.")
asset_lib.save_loaded_asset(mat_to_edit)
else:
unreal.log(f" > ๋ณ๊ฒฝ์ฌํญ์ด ์์ด ์ ์ฅ์ ๊ฑด๋๋๋๋ค.")
processed_count += 1
if processed_count == 0:
unreal.log_warning("์ฒ๋ฆฌ๋ ๋จธํฐ๋ฆฌ์ผ์ด ์์ด ์์
์ ์ข
๋ฃํฉ๋๋ค.")
else:
unreal.log(f"์ด {processed_count}๊ฐ์ ๋จธํฐ๋ฆฌ์ผ ์ฌ์ฒ๋ฆฌ๊ฐ ์๋ฃ๋์์ต๋๋ค.")
# ์คํฌ๋ฆฝํธ ์คํ
reprocess_selected_materials()
|
import sys
import unreal
import json
from pathlib import Path
from importlib import reload, import_module
BASE_PATH = Path(r'/project/-scripts')
sys.path.append(str(BASE_PATH))
MODULES_TO_RELOAD = ('ue_utils', 'stats', 'consts', 'clean_numbers', 'jsonutils', 'colors')
for module in MODULES_TO_RELOAD:
# Load the module, then force reload it
module = import_module(module)
reload(module)
from jsonutils import save_as_json
data = {}
assets = unreal.EditorUtilityLibrary().get_selected_assets()
for asset in assets:
changes = asset.wc_get_all_nondefault_property_values()
if not changes:
continue
change_data = {}
for key,value in changes:
if isinstance(value, (int, str, float, bool)):
change_data[key] = value
else:
print(f'Unknown type for {key}: {type(value)}')
data[asset.get_path_name()] = change_data
save_as_json(data, BASE_PATH / 'data' / 'difficulty.json', pretty=True)
|
# Copyright Epic Games, Inc. All Rights Reserved.
import os
import unreal
#-------------------------------------------------------------------------------
class Prompt(object):
def prompt(self, context):
try:
self._get_ue_branch(context)
except EnvironmentError:
self._get_git_branch(context)
context.TITLE = context.UE_BRANCH + " | " + context.UE_PROJECT
super().prompt(context)
def _get_ue_branch(self, context):
try:
session = self.get_noticeboard(self.Noticeboard.SESSION)
ue_context = unreal.Context(session["uproject"] or ".")
branch_full = ue_context.get_engine().get_info().get("BranchName", "UnknownBranch")
branch_name = ue_context.get_name()
if project := ue_context.get_project():
project_name = project.get_name()
if ue_context.get_type() == unreal.ContextType.FOREIGN:
branch_name = ue_context.get_engine().get_dir().parent.name
project_name += "(F)"
else:
project_name = "-"
except EnvironmentError:
raise
except Exception as e:
branch_full = "Error"
branch_name = "Error"
project_name = str(e) + "\nError"
context.UE_BRANCH = branch_name
context.UE_PROJECT = project_name
context.UE_BRANCH_FULL = branch_full
def _get_git_branch(self, context):
from pathlib import Path
for git_root in (Path(os.getcwd()) / "x").parents:
if (git_root / ".git").is_dir():
break
else:
context.UE_BRANCH = "NoBranch"
context.UE_PROJECT = "NoProject"
return
context.UE_BRANCH = git_root.stem
context.UE_PROJECT = "nobranch"
with (git_root / ".git/HEAD").open("rt") as file:
for line in file.readlines():
line = line.strip()
if line.startswith("ref: "):
context.UE_PROJECT = line[5:].split("/")[-1]
else:
context.UE_PROJECT = line[:6]
break
|
import unreal
from ueGear.controlrig.paths import CONTROL_RIG_FUNCTION_PATH
from ueGear.controlrig.components import base_component, EPIC_control_01, EPIC_leg_01
from ueGear.controlrig.helpers import controls
class Component(EPIC_leg_01.Component):
name = "test_Leg"
mgear_component = "EPIC_leg_02"
def __init__(self):
super().__init__()
class ManualComponent(EPIC_leg_01.ManualComponent):
name = "EPIC_leg_02"
mgear_component = "EPIC_leg_02"
def __init__(self):
super().__init__()
|
# -*- coding: utf-8 -*-
"""Load textures from PNG."""
from ayon_core.pipeline import AYON_CONTAINER_ID
from ayon_unreal.api import plugin
from ayon_unreal.api.pipeline import (
create_container,
imprint,
format_asset_directory
)
import unreal # noqa
class TexturePNGLoader(plugin.Loader):
"""Load Unreal texture from PNG file."""
product_types = {"image", "texture", "render"}
label = "Import image texture 2d"
representations = {"*"}
extensions = {"png", "jpg", "tiff", "exr"}
icon = "wallpaper"
color = "orange"
# Defined by settings
show_dialog = False
loaded_asset_dir = "{folder[path]}/{product[name]}_{version[version]}"
loaded_asset_name = "{folder[name]}_{product[name]}_{version[version]}_{representation[name]}" # noqa
@classmethod
def apply_settings(cls, project_settings):
super(TexturePNGLoader, cls).apply_settings(project_settings)
unreal_settings = project_settings.get("unreal", {})
# Apply import settings
import_settings = unreal_settings.get("import_settings", {})
cls.show_dialog = import_settings.get("show_dialog", cls.show_dialog)
cls.loaded_asset_dir = import_settings.get("loaded_asset_dir", cls.loaded_asset_dir)
cls.loaded_asset_name = import_settings.get("loaded_asset_name", cls.loaded_asset_name)
@classmethod
def get_task(cls, filename, asset_dir, asset_name, replace):
task = unreal.AssetImportTask()
task.set_editor_property('filename', filename)
task.set_editor_property('destination_path', asset_dir)
task.set_editor_property('destination_name', asset_name)
task.set_editor_property('replace_existing', replace)
task.set_editor_property('automated', bool(not cls.show_dialog))
task.set_editor_property('save', True)
# set import options here
return task
@classmethod
def import_and_containerize(
self, filepath, asset_dir, container_name
):
if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir):
unreal.EditorAssetLibrary.make_directory(asset_dir)
unreal.log("Import using interchange method")
unreal.SystemLibrary.execute_console_command(
None, "Interchange.FeatureFlags.Import.PNG 1")
unreal.SystemLibrary.execute_console_command(
None, "Interchange.FeatureFlags.Import.JPG 1")
unreal.SystemLibrary.execute_console_command(
None, "Interchange.FeatureFlags.Import.TIFF 1")
unreal.SystemLibrary.execute_console_command(
None, "Interchange.FeatureFlags.Import.EXR 1")
import_asset_parameters = unreal.ImportAssetParameters()
import_asset_parameters.is_automated = bool(not self.show_dialog)
source_data = unreal.InterchangeManager.create_source_data(filepath)
interchange_manager = unreal.InterchangeManager.get_interchange_manager_scripted() # noqa
interchange_manager.import_asset(
asset_dir, source_data,import_asset_parameters
)
if not unreal.EditorAssetLibrary.does_asset_exist(
f"{asset_dir}/{container_name}"):
# Create Asset Container
create_container(container=container_name, path=asset_dir)
return asset_dir
def imprint(
self,
folder_path,
asset_dir,
container_name,
asset_name,
repre_entity,
product_type,
project_name
):
data = {
"schema": "ayon:container-2.0",
"id": AYON_CONTAINER_ID,
"namespace": asset_dir,
"folder_path": folder_path,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": repre_entity["id"],
"parent": repre_entity["versionId"],
"product_type": product_type,
# TODO these shold be probably removed
"asset": folder_path,
"family": product_type,
"project_name": project_name
}
imprint(f"{asset_dir}/{container_name}", data)
def load(self, context, name, namespace, options):
"""Load and containerise representation into Content Browser.
Args:
context (dict): application context
name (str): Product name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
options (dict): Those would be data to be imprinted.
Returns:
list(str): list of container content
"""
# Create directory for asset and Ayon container
folder_path = context["folder"]["path"]
suffix = "_CON"
path = self.filepath_from_context(context)
asset_root, asset_name = format_asset_directory(
context, self.loaded_asset_dir, self.loaded_asset_name
)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
asset_root, suffix="")
container_name += suffix
asset_dir = self.import_and_containerize(
path, asset_dir, container_name
)
self.imprint(
folder_path,
asset_dir,
container_name,
asset_name,
context["representation"],
context["product"]["productType"],
context["project"]["name"],
)
asset_contents = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for unreal_asset in asset_contents:
unreal.EditorAssetLibrary.save_asset(unreal_asset)
return asset_contents
def update(self, container, context):
folder_path = context["folder"]["path"]
product_type = context["product"]["productType"]
repre_entity = context["representation"]
path = self.filepath_from_context(context)
# Create directory for asset and Ayon container
suffix = "_CON"
asset_root, asset_name = format_asset_directory(
context, self.loaded_asset_dir, self.loaded_asset_name
)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
asset_root, suffix="")
container_name += suffix
asset_dir = self.import_and_containerize(
path, asset_dir, container_name
)
self.imprint(
folder_path,
asset_dir,
container_name,
asset_name,
repre_entity,
product_type,
context["project"]["name"]
)
asset_contents = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=False
)
for unreal_asset in asset_contents:
unreal.EditorAssetLibrary.save_asset(unreal_asset)
def remove(self, container):
path = container["namespace"]
if unreal.EditorAssetLibrary.does_directory_exist(path):
unreal.EditorAssetLibrary.delete_directory(path)
|
#!/project/ python3
"""
Unit tests for socket snapping functionality
Run this in Unreal Engine's Python console to test socket operations
"""
# import math
from typing import Optional, Tuple
import unreal
class SocketSnappingUnitTest:
"""Unit tests for socket snapping mathematics and operations."""
def __init__(self):
self.test_results = []
self.test_actors = []
def cleanup(self):
"""Clean up test actors."""
for actor in self.test_actors:
if actor and actor.is_valid():
actor.destroy_actor()
self.test_actors.clear()
def assert_equal(self, actual, expected, tolerance=0.01, message=""):
"""Assert values are equal within tolerance."""
if isinstance(actual, (list, tuple)) and isinstance(expected, (list, tuple)):
if len(actual) != len(expected):
raise AssertionError(f"{message}: Length mismatch {len(actual)} != {len(expected)}")
for a, e in zip(actual, expected, strict=True):
if abs(a - e) > tolerance:
raise AssertionError(f"{message}: {actual} != {expected}")
else:
if abs(actual - expected) > tolerance:
raise AssertionError(f"{message}: {actual} != {expected}")
def create_test_actor_with_socket(
self,
name: str,
location: Tuple[float, float, float],
socket_name: str,
socket_offset: Tuple[float, float, float],
) -> unreal.Actor:
"""Create a test actor with a simulated socket position."""
# Use a basic cube mesh
cube_mesh = unreal.EditorAssetLibrary.load_asset("/project/")
if not cube_mesh:
raise RuntimeError("Could not load cube mesh")
# Spawn actor
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(
cube_mesh, unreal.Vector(*location), unreal.Rotator(0, 0, 0)
)
if not actor:
raise RuntimeError(f"Failed to spawn actor {name}")
actor.set_actor_label(name)
self.test_actors.append(actor)
# Store socket information as tags (since we can't add real sockets at runtime)
actor.tags = [
f"socket:{socket_name}",
f"socket_offset:{socket_offset[0]},{socket_offset[1]},{socket_offset[2]}",
]
return actor
def get_simulated_socket_transform(self, actor: unreal.Actor, socket_name: str) -> Optional[unreal.Transform]:
"""Get simulated socket transform from actor tags."""
socket_offset = None
for tag in actor.tags:
if tag.startswith(f"socket:{socket_name}"):
# Found socket
pass
elif tag.startswith("socket_offset:"):
parts = tag.split(":")[1].split(",")
socket_offset = unreal.Vector(float(parts[0]), float(parts[1]), float(parts[2]))
if socket_offset is None:
return None
# Calculate world transform
actor_transform = actor.get_actor_transform()
socket_transform = unreal.Transform()
socket_transform.location = actor_transform.transform_position(socket_offset)
socket_transform.rotation = actor_transform.rotation
socket_transform.scale3d = actor_transform.scale3d
return socket_transform
def test_basic_socket_math(self):
"""Test basic socket transformation mathematics."""
print("Test 1: Basic Socket Math")
# Create actor at origin
actor = self.create_test_actor_with_socket(
"TestActor_Math", location=(0, 0, 0), socket_name="TestSocket", socket_offset=(100, 0, 50)
)
# Get socket world position
socket_transform = self.get_simulated_socket_transform(actor, "TestSocket")
# Verify socket is at expected world position
expected_location = [100, 0, 50]
actual_location = [socket_transform.location.x, socket_transform.location.y, socket_transform.location.z]
self.assert_equal(actual_location, expected_location, tolerance=0.01, message="Socket world position")
print("โ
Basic socket math test passed")
self.test_results.append(("Basic Socket Math", True))
def test_rotated_socket_math(self):
"""Test socket transformation with rotation."""
print("Test 2: Rotated Socket Math")
# Create actor rotated 90 degrees
actor = self.create_test_actor_with_socket(
"TestActor_Rotated",
location=(1000, 0, 0),
socket_name="TestSocket",
socket_offset=(100, 0, 0), # Socket 100 units forward
)
# Rotate actor 90 degrees (yaw)
actor.set_actor_rotation(unreal.Rotator(0, 0, 90))
# Get socket world position
socket_transform = self.get_simulated_socket_transform(actor, "TestSocket")
# After 90-degree rotation, forward (100, 0, 0) becomes right (0, 100, 0)
expected_location = [1000, 100, 0] # Actor at 1000,0,0 + rotated offset
actual_location = [socket_transform.location.x, socket_transform.location.y, socket_transform.location.z]
self.assert_equal(actual_location, expected_location, tolerance=1.0, message="Rotated socket world position")
print("โ
Rotated socket math test passed")
self.test_results.append(("Rotated Socket Math", True))
def test_socket_to_socket_alignment(self):
"""Test aligning two actors via their sockets."""
print("Test 3: Socket-to-Socket Alignment")
# Create target actor with socket
target = self.create_test_actor_with_socket(
"Target_Actor",
location=(2000, 0, 0),
socket_name="ConnectSocket",
socket_offset=(150, 0, 0), # Socket on right side
)
# Create source actor with socket
source = self.create_test_actor_with_socket(
"Source_Actor",
location=(2500, 500, 0), # Start elsewhere
socket_name="AttachSocket",
socket_offset=(-150, 0, 0), # Socket on left side
)
# Calculate where source needs to be to align sockets
target_socket_world = self.get_simulated_socket_transform(target, "ConnectSocket").location
source_socket_local = unreal.Vector(-150, 0, 0)
# Source actor should be positioned so its socket aligns with target socket
new_source_location = target_socket_world - source_socket_local
source.set_actor_location(new_source_location)
# Verify sockets are aligned
source_socket_world = self.get_simulated_socket_transform(source, "AttachSocket").location
distance = (source_socket_world - target_socket_world).size()
self.assert_equal(distance, 0, tolerance=1.0, message="Socket alignment distance")
print("โ
Socket-to-socket alignment test passed")
self.test_results.append(("Socket-to-Socket Alignment", True))
def test_socket_with_offset(self):
"""Test socket snapping with additional offset."""
print("Test 4: Socket with Offset")
# Create base actor
base = self.create_test_actor_with_socket(
"Base_Actor",
location=(3000, 0, 0),
socket_name="MountSocket",
socket_offset=(0, 0, 100), # Socket on top
)
# Create attachment with offset
attachment = self.create_test_actor_with_socket(
"Attachment_Actor",
location=(3000, 0, 0),
socket_name="BaseSocket",
socket_offset=(0, 0, 0), # Socket at pivot
)
# Apply socket position plus additional offset
socket_world = self.get_simulated_socket_transform(base, "MountSocket").location
additional_offset = unreal.Vector(0, 0, 50) # 50 units higher
final_position = socket_world + additional_offset
attachment.set_actor_location(final_position)
# Verify position
expected_z = 3000 + 100 + 50 # Base Z + socket offset + additional offset
actual_z = attachment.get_actor_location().z
self.assert_equal(actual_z, expected_z, tolerance=1.0, message="Socket with offset Z position")
print("โ
Socket with offset test passed")
self.test_results.append(("Socket with Offset", True))
def test_complex_rotation_alignment(self):
"""Test complex rotation alignment scenarios."""
print("Test 5: Complex Rotation Alignment")
# Create actor with complex rotation
actor1 = self.create_test_actor_with_socket(
"Complex_Actor1", location=(4000, 0, 0), socket_name="Socket1", socket_offset=(100, 50, 25)
)
# Apply complex rotation (roll, pitch, yaw)
actor1.set_actor_rotation(unreal.Rotator(15, 30, 45))
# Create second actor to align
actor2 = self.create_test_actor_with_socket(
"Complex_Actor2", location=(4500, 0, 0), socket_name="Socket2", socket_offset=(0, 0, 0)
)
# Get socket transform and align actor2
socket_transform = self.get_simulated_socket_transform(actor1, "Socket1")
actor2.set_actor_location(socket_transform.location)
actor2.set_actor_rotation(socket_transform.rotation.rotator())
# Verify rotation matches
expected_rotation = actor1.get_actor_rotation()
actual_rotation = actor2.get_actor_rotation()
self.assert_equal(
[actual_rotation.roll, actual_rotation.pitch, actual_rotation.yaw],
[expected_rotation.roll, expected_rotation.pitch, expected_rotation.yaw],
tolerance=0.1,
message="Complex rotation alignment",
)
print("โ
Complex rotation alignment test passed")
self.test_results.append(("Complex Rotation Alignment", True))
def run_all_tests(self):
"""Run all unit tests."""
print("\n" + "=" * 50)
print("Socket Snapping Unit Tests")
print("=" * 50 + "\n")
try:
self.test_basic_socket_math()
self.test_rotated_socket_math()
self.test_socket_to_socket_alignment()
self.test_socket_with_offset()
self.test_complex_rotation_alignment()
# Print summary
print("\n" + "=" * 50)
print("Test Summary")
print("=" * 50)
passed = sum(1 for _, result in self.test_results if result)
total = len(self.test_results)
print(f"โ
Passed: {passed}/{total}")
for test_name, result in self.test_results:
icon = "โ
" if result else "โ"
print(f"{icon} {test_name}")
if passed == total:
print("\n๐ All tests passed!")
else:
print(f"\nโ ๏ธ {total - passed} tests failed")
except Exception as e:
print(f"\nโ Test failed with error: {str(e)}")
import traceback
traceback.print_exc()
finally:
self.cleanup()
print("\n๐งน Cleaned up test actors")
# Function to run from UE Python console
def run_socket_tests():
"""Run socket snapping unit tests."""
tester = SocketSnappingUnitTest()
tester.run_all_tests()
if __name__ == "__main__":
run_socket_tests()
|
import unreal
import os
import time
start_time = time.time()
# Instances of unreal classes
editor_util = unreal.EditorUtilityLibrary()
editor_asset_lib = unreal.EditorAssetLibrary()
# Get the selected assets
selected_assets = editor_util.get_selected_assets()
num_assets = len(selected_assets)
num_copies = 3
total_num_copies = num_assets * num_copies
text_label = "Duplicating Assets"
running = True
with unreal.ScopedSlowTask(total_num_copies, text_label) as slow_task:
slow_task.make_dialog(True)
# Iterate over the assets
for asset in selected_assets:
# Get the asset name and path to be duplicated
asset_name = asset.get_fname()
asset_path = editor_asset_lib.get_path_name_for_loaded_asset(asset)
source_path = os.path.dirname(asset_path)
for i in range(num_copies):
# If user pressed the cancel button
if slow_task.should_cancel():
running = False
break
new_name = f"{asset_name}_{i}"
dest_path = os.path.join(source_path, new_name)
duplicate = editor_asset_lib.duplicate_asset(asset_path, dest_path)
slow_task.enter_progress_frame(1)
if duplicate:
unreal.log_warning(f"Duplicate from {source_path} at {dest_path} already exists.")
if not running:
break
end_time = time.time()
unreal.log(f"{num_assets} asset/s duplicated {num_assets} times in {end_time - start_time}.")
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/project/-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unreal
def organize_world_outliner(name_mapping: dict[str, str], type_mapping: dict[unreal.Class, str]):
editor_level_lib = unreal.EditorLevelLibrary()
editor_filter_lib = unreal.EditorFilterLibrary()
# get all actors and filter down to specific elements
actors: unreal.Array[unreal.Actor] = editor_level_lib.get_all_level_actors()
moved = 0
for name_sub_string in name_mapping.keys():
folder_name = name_mapping[name_sub_string]
matched_actors: unreal.Array[unreal.Actor] = editor_filter_lib.by_id_name(actors, name_sub_string)
for actor in matched_actors:
actor_name = actor.get_fname()
actor.set_folder_path(unreal.Name(folder_name))
unreal.log(f"Moved {actor_name} into {folder_name}")
moved += 1
for actor_type in type_mapping.keys():
folder_name = type_mapping[actor_type]
matched_actors: unreal.Array[unreal.Actor] = editor_filter_lib.by_class(actors, actor_type)
for actor in matched_actors:
actor_name = actor.get_fname()
actor.set_folder_path(unreal.Name(folder_name))
unreal.log(f"Moved {actor_name} into {folder_name}")
moved += 1
unreal.log(f"Moved {moved} actors into respective folders")
organize_world_outliner({"BP_": "Blueprints"}, {unreal.StaticMeshActor: "StaticMeshActors",
unreal.ReflectionCapture: "ReflectionCaptures"})
|
import unreal
def open_level(level_path):
if unreal.EditorAssetLibrary.does_asset_exist(level_path):
unreal.EditorLevelLibrary.load_level(level_path)
print(f"Successfully opened level: {level_path}")
else:
print(f"Level path '{level_path}' does not exist.")
target_path:str
open_level(target_path)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/project/-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unreal
import os
def remove_unused(instant_delete=True, trash_folder="/project/"):
editor_util = unreal.EditorUtilityLibrary()
editor_asset = unreal.EditorAssetLibrary()
selected_assets = editor_util.get_selected_assets()
asset_count = len(selected_assets)
removed = 0
to_delete = []
for asset in selected_assets:
# get the full path to the to be duplicated asset
asset_path = editor_asset.get_path_name_for_loaded_asset(asset)
# get a list of references for this asset
asset_references = editor_asset.find_package_referencers_for_asset(asset_path, True)
if 0 == len(asset_references):
to_delete.append(asset)
for asset in to_delete:
asset_name = asset.get_package().get_name()
if instant_delete:
# instantly delete the assets
if not editor_asset.delete_loaded_asset(asset):
unreal.log_warning(f"Asset {asset_name} could not be deleted")
else:
removed += 1
else:
# move the assets to the trash folder
new_path = f"{trash_folder}/{str(asset_name)}"
unreal.log(f"Move {str(asset)} to {new_path}")
editor_asset.make_directory(os.path.dirname(new_path))
if not editor_asset.rename_loaded_asset(asset, new_path):
unreal.log_warning(f"Asset {asset_name} could not be moved to Trash")
else:
removed += 1
unreal.log(f"{removed} of {len(to_delete)} to be deleted assets, of {asset_count} selected, removed")
remove_unused(instant_delete=False, trash_folder="/project/")
|
import unreal
# instances of unreal classes
editor_level_lib = unreal.EditorLevelLibrary()
editor_filter_lib = unreal.EditorFilterLibrary()
# get all level actors
level_actors = editor_level_lib.get_all_level_actors()
static_mesh_actors = editor_filter_lib.by_class(level_actors, unreal.StaticMeshActor)
deleted = 0
for actor in static_mesh_actors:
actor_name = actor.get_fname()
# get the static mesh through the static mesh component
actor_mesh_com = actor.static_mesh_component
actor_mesh = actor_mesh_com.static_mesh
is_valid_actor = actor_mesh != None
# if mesh not valid
if not is_valid_actor:
actor.destroy_actor()
deleted += 1
unreal.log("The Mesh component of actor {} is invalid and has been deleted".format(actor_name))
unreal.log("{} actor has been deleted".format(deleted))
|
import unreal
from pamux_unreal_tools.impl.material_function_impl import MaterialFunctionImpl
from pamux_unreal_tools.base.material_function.material_function_factory_base import MaterialFunctionFactoryBase
class MaterialFunctionFactory(MaterialFunctionFactoryBase):
def __init__(self):
super().__init__(unreal.MaterialFunction, unreal.MaterialFunctionFactoryNew(), MaterialFunctionImpl)
|
import json
import unreal
py_lib = unreal.PyToolkitBPLibrary()
objects = py_lib.get_all_objects()
res_list = []
for obj in objects:
try:
res_list.append(obj.get_path_name())
except:
print("error -> %s" % obj)
path = r"/project/.json"
with open(path,'w') as f:
json.dump(res_list,f,indent=4)
# UMaterialInstanceConstant *URedArtToolkitBPLibrary::GetMaterialEditorSourceInstance(UMaterialEditorInstanceConstant *Editor)
# {
# return Editor->SourceInstance;
# }
red_lib = unreal.RedArtToolkitBPLibrary()
def list_material_editor(num=1000):
material_editor = {}
for i in range(num):
editor = unreal.load_object(None,"/project/.MaterialEditorInstanceConstant_%s" % i)
if editor:
material = red_lib.get_material_editor_source_instance(editor)
if material:
material_editor[material] = editor
return material_editor
edit_asset = red_lib.get_focused_edit_asset()
material_editor = list_material_editor()
editor = material_editor.get(edit_asset)
if editor:
pass
menu = unreal.load_object(None,"/project/.ToolMenus_0:ToolMenu_61")
print(menu.menu_name)
section = unreal.load_object(None,"/project/.Selection_3")
print(section)
material = unreal.load_object(None,'/project/.MI_Wp_Snp_M82_D1_TPS')
obj = material.get_base_material()
print(material)
editor = unreal.MaterialEditorInstanceConstant.cast(material)
assets = red_lib.get_assets_opened_in_editor()
print(assets)
for asset in assets:
editor = red_lib.get_focus_material_editor_instance(asset)
print(editor)
# instance = unreal.MaterialInstance.cast(asset)
print(asset.parent())
print(instance)
['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '_post_init', '_wrapper_meta_data', 'call_method', 'cast', 'font_parameter_values', 'get_base_material', 'get_class', 'get_default_object', 'get_editor_property', 'get_fname', 'get_full_name', 'get_name', 'get_outer', 'get_outermost', 'get_parameter_info', 'get_path_name', 'get_physical_material', 'get_physical_material_from_map', 'get_physical_material_mask', 'get_scalar_parameter_value', 'get_texture_parameter_value', 'get_typed_outer', 'get_vector_parameter_value', 'get_world', 'modify', 'override_subsurface_profile', 'parent', 'phys_material', 'rename', 'runtime_virtual_texture_parameter_values', 'scalar_parameter_values', 'set_editor_properties', 'set_editor_property', 'set_force_mip_levels_to_be_resident', 'static_class', 'subsurface_profile', 'texture_parameter_values', 'vector_parameter_values']
# section = unreal.load_class(None,"/project/.BP_RedEffectView_C")
# print(section)
# Blueprint'/project/.BP_RedEffectView'
['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '_post_init', '_wrapper_meta_data', 'call_method', 'cast', 'get_class', 'get_default_object', 'get_editor_property', 'get_fname', 'get_full_name', 'get_name', 'get_outer', 'get_outermost', 'get_path_name', 'get_typed_outer', 'get_world', 'modify', 'rename', 'set_editor_properties', 'set_editor_property', 'static_class']
|
import unreal
import logging
logger = logging.getLogger(__name__)
EAL = unreal.EditorAssetLibrary
ATH = unreal.AssetToolsHelpers
AT = ATH.get_asset_tools()
from pamux_unreal_tools.generated.material_expression_wrappers import NamedRerouteDeclaration
from pamux_unreal_tools.base.material.material_base import MaterialBase
from pamux_unreal_tools.base.material_function.material_function_base import MaterialFunctionBase
from pamux_unreal_tools.base.material_expression.material_expression_container_base import MaterialExpressionContainerBase
from pamux_unreal_tools.utils.build_stack import BuildStack
from pamux_unreal_tools.utils.pamux_asset_utils import PamuxAssetUtils
from pamux_unreal_tools.utils.types import *
class MaterialExpressionContainerFactoryBase:
# container_wrapper_class: type[MaterialBase | MaterialFunctionBase]
def __init__(self, asset_class: unreal.Class, asset_factory: unreal.Factory, container_wrapper_class) -> None:
self.asset_class = asset_class
self.asset_factory = asset_factory
self.container_wrapper_class = container_wrapper_class
# -> MaterialBase | MaterialFunctionBase
def load(self, builder, container_path: str, virtual_inputs: SocketNames, virtual_outputs: SocketNames) -> MaterialExpressionContainerBase:
return self.__load_wrapped_asset(builder, container_path, virtual_inputs, virtual_outputs)
# -> MaterialBase | MaterialFunctionBase
def loadAndClean(self, builder, container_path, virtual_inputs: SocketNames, virtual_outputs: SocketNames) -> MaterialExpressionContainerBase:
result = self.__load_wrapped_asset(builder, container_path, virtual_inputs, virtual_outputs)
result.deleteAllMaterialExpressions()
return result
# -> MaterialBase | MaterialFunctionBase
def loadAndCleanOrCreate(self, builder, container_path, virtual_inputs: SocketNames, virtual_outputs: SocketNames) -> MaterialExpressionContainerBase:
try:
if EAL.does_asset_exist(container_path):
return self.loadAndClean(builder, container_path, virtual_inputs, virtual_outputs)
except:
logger.error(f"Exception loading: {container_path}")
return self.__create_wrapped_asset(builder, container_path, virtual_inputs, virtual_outputs)
# -> MaterialBase | MaterialFunctionBase
def __load_wrapped_asset(self, builder, asset_path: str, virtual_inputs: SocketNames, virtual_outputs: SocketNames) -> MaterialExpressionContainerBase:
result = self.__load_and_wrap_asset(asset_path)
result.builder = builder
result.virtual_inputs = virtual_inputs
result.virtual_outputs = virtual_outputs
return result
# -> MaterialBase | MaterialFunctionBase
def __create_wrapped_asset(self, builder, asset_path: str, virtual_inputs: SocketNames, virtual_outputs: SocketNames) -> MaterialExpressionContainerBase:
result = self.__create_and_wrap_asset(asset_path)
result.builder = builder
result.virtual_inputs = virtual_inputs
result.virtual_outputs = virtual_outputs
return result
# -> MaterialBase | MaterialFunctionBase
def __load_and_wrap_asset(self, asset_path: str) -> MaterialExpressionContainerBase:
unrealAsset = EAL.load_asset(asset_path)
if unrealAsset is None:
raise Exception(f"Can't load asset: {asset_path}")
return self.container_wrapper_class(unrealAsset)
# -> MaterialBase | MaterialFunctionBase
def __create_and_wrap_asset(self, asset_path: str) -> MaterialExpressionContainerBase:
package_path, asset_name = PamuxAssetUtils.split_asset_path(asset_path)
unrealAsset = AT.create_asset(asset_name, package_path, self.asset_class, self.asset_factory)
if unrealAsset is None:
raise Exception(f"Can't create asset: {asset_path}")
return self.container_wrapper_class(unrealAsset)
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" An example script that uses the API to instantiate an HDA and then set
the ramp points of a float ramp and a color ramp.
"""
import math
import unreal
_g_wrapper = None
def get_test_hda_path():
return '/project/.ramp_example_1_0'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def set_parameters(in_wrapper):
print('set_parameters')
# Unbind from the delegate
in_wrapper.on_post_instantiation_delegate.remove_callable(set_parameters)
# There are two ramps: heightramp and colorramp. The height ramp is a float
# ramp. As an example we'll set the number of ramp points and then set
# each point individually
in_wrapper.set_ramp_parameter_num_points('heightramp', 6)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 0, 0.0, 0.1)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 1, 0.2, 0.6)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 2, 0.4, 1.0)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 3, 0.6, 1.4)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 4, 0.8, 1.8)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 5, 1.0, 2.2)
# For the color ramp, as an example, we can set the all the points via an
# array.
in_wrapper.set_color_ramp_parameter_points('colorramp', (
unreal.HoudiniPublicAPIColorRampPoint(position=0.0, value=unreal.LinearColor.GRAY),
unreal.HoudiniPublicAPIColorRampPoint(position=0.5, value=unreal.LinearColor.GREEN),
unreal.HoudiniPublicAPIColorRampPoint(position=1.0, value=unreal.LinearColor.RED),
))
def print_parameters(in_wrapper):
print('print_parameters')
in_wrapper.on_post_processing_delegate.remove_callable(print_parameters)
# Print the ramp points directly
print('heightramp: num points {0}:'.format(in_wrapper.get_ramp_parameter_num_points('heightramp')))
heightramp_data = in_wrapper.get_float_ramp_parameter_points('heightramp')
if not heightramp_data:
print('\tNone')
else:
for idx, point_data in enumerate(heightramp_data):
print('\t\t{0}: position={1:.6f}; value={2:.6f}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
print('colorramp: num points {0}:'.format(in_wrapper.get_ramp_parameter_num_points('colorramp')))
colorramp_data = in_wrapper.get_color_ramp_parameter_points('colorramp')
if not colorramp_data:
print('\tNone')
else:
for idx, point_data in enumerate(colorramp_data):
print('\t\t{0}: position={1:.6f}; value={2}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
# Print all parameter values
param_tuples = in_wrapper.get_parameter_tuples()
print('parameter tuples: {}'.format(len(param_tuples) if param_tuples else 0))
if param_tuples:
for param_tuple_name, param_tuple in param_tuples.items():
print('parameter tuple name: {}'.format(param_tuple_name))
print('\tbool_values: {}'.format(param_tuple.bool_values))
print('\tfloat_values: {}'.format(param_tuple.float_values))
print('\tint32_values: {}'.format(param_tuple.int32_values))
print('\tstring_values: {}'.format(param_tuple.string_values))
if not param_tuple.float_ramp_points:
print('\tfloat_ramp_points: None')
else:
print('\tfloat_ramp_points:')
for idx, point_data in enumerate(param_tuple.float_ramp_points):
print('\t\t{0}: position={1:.6f}; value={2:.6f}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
if not param_tuple.color_ramp_points:
print('\tcolor_ramp_points: None')
else:
print('\tcolor_ramp_points:')
for idx, point_data in enumerate(param_tuple.color_ramp_points):
print('\t\t{0}: position={1:.6f}; value={2}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset with auto-cook enabled
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform())
# Set the float and color ramps on post instantiation, before the first
# cook.
_g_wrapper.on_post_instantiation_delegate.add_callable(set_parameters)
# Print the parameter state after the cook and output creation.
_g_wrapper.on_post_processing_delegate.add_callable(print_parameters)
if __name__ == '__main__':
run()
|
import unreal
import json
import sys
import os
import subprocess
import yaml
import scripts.skeletalMeshNameFetcher as skeletalMeshNameFetcher
import scripts.recording.takeRecorder as takeRecorder
import scripts.levelSequence as levelSequence
import scripts.communication.wsCommunicationScript as wsCommunicationScript
import scripts.state.stateManagerScript as stateManagerScript
import scripts.export.exportAndSend as exportAndSend
import scripts.popUp as popUp
import scripts.communication.callback as callback
import scripts.utils.editorFuncs as editorFuncs
import scripts.config.params as paramsmanager
import scripts.utils.extraFuncs as extraFuncs
# Set the parameters from the config file
params = paramsmanager.Params().get()
class KeepRunningTakeRecorder:
"""
Utility class for managing continuous recording with the Take Recorder.
This class provides functionality to start and stop recording using the
provided 'stateManager'. It utilizes Slate post-tick callbacks
to continuously monitor the recording state and take appropriate actions.
We are also able to replay the last recording.
We need to hook on the tick function because in Unreal Engine, many API
calls, especially thoserelated to editor functions, must be called from
the main thread.
Methods:
- start(): Start the take recorder.
- stop(): Stop the take recorder.
- tick(delta_time: float): Perform actions based on the current state.
"""
def __init__(self, tk: takeRecorder.TakeRecorder, file):
print("Initializing KeepRunningTakeRecorder...")
self.tk = tk
self.actorName = params["actor_name"]
self.actorNameShorthand = params["actor_name_shorthand"]
self.replayActor = editorFuncs.get_actor_by_name(params["replay_actor_name"])
self.slate_post_tick_handle = None
self.resettingPopUpText = None
self.resettingPopUpTitle = None
def start(self) -> None:
"""
Start the take recorder.
Registers a Slate post-tick callback to execute 'tick' method.
"""
print("Starting Take Recorder...")
self.slate_post_tick_handle = unreal.register_slate_post_tick_callback(self.tick)
popUp.show_popup_message("KeepRunningTakeRecorder", f"Tick hook started, keeping double recordings: True")
def stop(self) -> None:
"""
Safely stop the take recorder and unregister the Slate post-tick callback.
"""
if self.slate_post_tick_handle is not None:
try:
print("Unregistering Slate post-tick callback...")
unreal.unregister_slate_post_tick_callback(self.slate_post_tick_handle)
self.slate_post_tick_handle = None
print("Slate post-tick callback unregistered successfully.")
except Exception as e:
print(f"Error during unregistration: {e}")
popUp.show_popup_message("KeepRunningTakeRecorder", f"Error during unregistration: {str(e)}")
else:
print("Slate post-tick callback was already unregistered or never registered.")
def tick(self, delta_time: float) -> None:
"""
Perform actions based on the current state.
If the recording state is "start", begin recording.
If the recording state is "stop", stop recording.
If the recording state is "replay_record", replay the last recording.
"""
# When resetting, we are waiting for the take recorder to be ready (making it so he has saved the last recording)
if stateManager.get_recording_status() == stateManagerScript.Status.RESETTING:
if self.tk.take_recorder_ready():
print("Resetting state to idle.")
stateManager.set_recording_status(stateManagerScript.Status.IDLE)
if self.resettingPopUpText:
popUp.show_popup_message(self.resettingPopUpTitle, self.resettingPopUpText)
self.resettingPopUpText = None
self.resettingPopUpTitle = None
return
if stateManager.get_recording_status() == stateManagerScript.Status.DIE:
self.stop() # Unregister the callback when stopping
return
if stateManager.get_recording_status() == stateManagerScript.Status.START:
self.tk.start_recording()
stateManager.set_recording_status(stateManagerScript.Status.RECORDING)
return
if stateManager.get_recording_status() == stateManagerScript.Status.STOP:
stateManager.set_recording_status(stateManagerScript.Status.RESETTING)
self.tk.stop_recording()
return
if stateManager.get_recording_status() == stateManagerScript.Status.REPLAY_RECORD:
print("TEST: Replaying last recording...")
# replay_actor = editorFuncs.get_actor_by_name(self.replayActor)
# # Check if the actor reference was found
# if replay_actor is None:
# print(f"Actor '{self.replayActor}' not found in the current world. Retrying 5 times then Set state to resetting.")
# for i in range(5):
# replay_actor = editorFuncs.get_actor_by_name(self.replayActor)
# if replay_actor is not None:
# break
# # stateManager.set_recording_status(stateManagerScript.Status.IDLE)
# stateManager.set_recording_status(stateManagerScript.Status.RESETTING)
# popUp.show_popup_message("replay", f"Actor '{self.replayActor}' not found in the current world. Set state to idle.")
# raise ValueError(f"Actor '{self.replayActor}' not found in the current world.")
print("TEST: FETCHING LAST ANIMATION")
last_anim, location = self.tk.fetch_last_animation(actor_name=self.actorNameShorthand)
if last_anim is None:
for _ in range(5):
last_anim, location = self.tk.fetch_last_animation(actor_name=self.actorNameShorthand)
if last_anim is not None:
break
# stateManager.set_recording_status(stateManagerScript.Status.IDLE)
self.resettingPopUpText = "No last recording found. Set state to idle."
self.resettingPopUpTitle = "replay"
stateManager.set_recording_status(stateManagerScript.Status.RESETTING)
return
# Using the statemanager, check if the location is the same as the last location (if it is, we are replaying the same recording)
# This happens due to UnrealEngine not saving the last recording in time, it can take a while. Therefore we show a popup message
if location == stateManager.get_last_location():
self.resettingPopUpText = "Replaying the same recording. Set state to idle. Please re-record if you want the export to work."
self.resettingPopUpTitle = "replay"
stateManager.set_recording_status(stateManagerScript.Status.RESETTING)
return
print(f"Replaying animation at: {location}")
self.tk.replay_anim(
replay_actor=self.replayActor,
anim=last_anim
)
stateManager.set_recording_status(stateManagerScript.Status.RESETTING)
# stateManager.set_recording_status(stateManagerScript.Status.IDLE)
return
# Exporting needs to be done through the main thread since UE5.5, the subthread communicating with the websocket therefore
# communicates with this main thread loop
if stateManager.get_recording_status() == stateManagerScript.Status.FBX_EXPORT or stateManager.get_recording_status() == stateManagerScript.Status.EXPORT_FBX:
anim, location = self.tk.fetch_last_animation(actor_name=self.actorNameShorthand)
stateManager.set_last_location(location)
if not self.tk.export_animation(location, stateManager.folder, stateManager.get_gloss_name(), actor_name=self.actorNameShorthand):
stateManager.set_recording_status(stateManagerScript.Status.EXPORT_FAIL)
else:
stateManager.set_recording_status(stateManagerScript.Status.EXPORT_SUCCESS)
if stateManager.get_recording_status() == stateManagerScript.Status.TORCH_TOGGLE:
print("Toggling torch...")
extraFuncs.torchToggle(editorFuncs.get_actor_by_name(self.actorName))
stateManager.set_recording_status(stateManagerScript.Status.IDLE)
return
return
print("Starting recorder...")
stateManager = stateManagerScript.StateManager(params["output_dir"])
stateManager.set_folder(params["output_dir"])
stateManager.set_recording_status(stateManagerScript.Status.IDLE)
tk = takeRecorder.TakeRecorder(stateManager)
ktk = KeepRunningTakeRecorder(tk, "")
ktk.start()
host = params["websocketServer"]
if len(sys.argv) > 1:
host = sys.argv[1]
wsCom = wsCommunicationScript.websocketCommunication(host, tk, ktk, params["actor_name"], params["replay_actor_name"])
# wsCom.keep_running_take_recorder = tk
|
from foundation.mcp_app import UnrealMCP
import foundation.utility as utility
import unreal
def register_resource(mcp:UnrealMCP):
# @mcp.resource("cpp_code://cpp/{root}/{relative_path}/{plugin_name}")
# def get_cpp_code(root : str, plugin_name: str, relative_path: str) -> str:
# """Get code from the specified path.
# The path should be relative to the Project Source directory.
# Arguments:
# root: The root path of the project. It should be "Project" or "Engine".
# plugin_name: Optional, The name of the plugin. if is none, it will be the project name.
# relative_path: The relative path to the file from the Project Source directory.
# """
# try:
# unreal.log(f"get_cpp_code: {root}, {plugin_name}, {relative_path}")
# path = utility.combine_code_path(root, plugin_name, relative_path)
# return path
# # with open(path, "r") as file:
# # return file.read()
# except FileNotFoundError:
# return f"File not found: {path}"
pass
|
import random
import re
import os
import unreal
from datetime import datetime
from typing import Optional, Callable
def clean_sequencer(level_sequence):
bindings = level_sequence.get_bindings()
for b in bindings:
# b_display_name = str(b.get_display_name())
# if "Camera" not in b_display_name:
# b.remove()
b.remove()
def select_random_asset(assets_path, asset_class=None, predicate:Optional[Callable]=None):
eal = unreal.EditorAssetLibrary()
assets = eal.list_assets(assets_path)
if asset_class is not None:
filtered_assets = []
for asset in assets:
try:
asset_name = os.path.splitext(asset)[0]
if unreal.EditorAssetLibrary.find_asset_data(asset_name).asset_class_path.asset_name == asset_class:
filtered_assets.append(asset)
except:
continue
assets = filtered_assets
if predicate is not None:
filtered_assets = []
for asset in assets:
if predicate(asset):
filtered_assets.append(asset)
assets = filtered_assets
selected_asset_path = random.choice(assets)
return selected_asset_path
def spawn_actor(asset_path, location=unreal.Vector(0.0, 0.0, 0.0)):
# spawn actor into level
obj = unreal.load_asset(asset_path)
rotation = unreal.Rotator(0, 0, 0)
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(object_to_use=obj,
location=location,
rotation=rotation)
actor.set_actor_scale3d(unreal.Vector(1.0, 1.0, 1.0))
# actor.get_editor_property('render_component').set_editor_property('cast_shadow', self.add_sprite_based_shadow)
return actor
def add_animation_to_actor(spawnable_actor, animation_path):
# Get the skeleton animation track class
anim_track = spawnable_actor.add_track(unreal.MovieSceneSkeletalAnimationTrack)
# Add a new animation section
animation_section = anim_track.add_section()
# Set the skeletal animation asset
animation_asset = unreal.load_asset(animation_path)
animation_section.params.animation = animation_asset
# Set the Section Range
frame_rate = 30 # level_sequence.get_frame_rate()
start_frame = 0
end_frame = animation_asset.get_editor_property('sequence_length') * frame_rate.numerator / frame_rate.denominator
animation_section.set_range(start_frame, end_frame)
def find_relevant_assets(level_sequence):
target_point_re = re.compile("TargetPoint_([0-9]+)")
target_points = {}
camera_rig_rail = None
camera = None
skylight = None
all_actors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_all_level_actors()
for actor in all_actors:
label = actor.get_actor_label()
name = actor.get_name()
# if 'HDRIBackdrop' in actor.get_name():
# hdri_backdrop = actor
target_point_matches = target_point_re.search(label)
if target_point_matches is not None:
target_points[target_point_matches.group(1)] = actor
if 'SkyLight' in name:
skylight = actor
if 'CineCameraRigRail' in label:
camera_rig_rail = actor
attached_actors = actor.get_attached_actors()
for child in attached_actors:
if "Camera" in child.get_name():
camera = child
# return camera, hdri_backdrop
return camera_rig_rail, camera, target_points, skylight
def random_hdri(hdri_backdrop):
selected_hdri_path = select_random_asset('/project/')
hdri_texture = unreal.load_asset(selected_hdri_path)
hdri_backdrop.set_editor_property('cubemap', hdri_texture)
def random_cubemap(skylight):
cubemap_path = select_random_asset('/project/', asset_class='TextureCube')
cubemap_asset = unreal.load_asset(cubemap_path)
if cubemap_asset is not None:
# Access the skylight component
skylight_comp = skylight.get_editor_property('light_component')
# Assign the new cubemap
skylight_comp.set_editor_property('cubemap', cubemap_asset)
# Update the skylight to apply the new cubemap
skylight_comp.recapture_sky()
def bind_camera_to_level_sequence(level_sequence, camera, start_frame=0, num_frames=0, move_radius=500):
# Get the Camera Cuts track manually
camera_cuts_track = None
for track in level_sequence.get_master_tracks():
if track.get_class() == unreal.MovieSceneCameraCutTrack.static_class():
camera_cuts_track = track
break
if camera_cuts_track is None:
print("No Camera Cuts track found.")
return
# Find the section (usually only one for camera cuts)
sections = camera_cuts_track.get_sections()
for section in sections:
if isinstance(section, unreal.MovieSceneCameraCutSection):
# Replace the camera binding
camera_binding = level_sequence.add_possessable(camera)
camera_binding_id = level_sequence.get_binding_id(camera_binding)
# Set the new camera binding to the camera cut section
section.set_camera_binding_id(camera_binding_id)
print("Camera cut updated to use:", camera.get_name())
# Add Transform Track
# transform_track = camera_binding.add_track(unreal.MovieScene3DTransformTrack)
# transform_section = transform_track.add_section()
# transform_section.set_range(start_frame, start_frame + num_frames)
# Get transform channels
# channels = transform_section.get_all_channels()
# loc_x_channel = channels[0]
# loc_y_channel = channels[1]
# loc_z_channel = channels[2]
# Get original camera location as center point
center_location = camera.get_actor_location()
# Generate random keyframes
# frames = sorted(random.sample(range(start_frame+1, start_frame+num_frames-1), num_keyframes-2))
# frames = [start_frame, start_frame+num_frames]
# Add keyframes
# for frame in frames:
# # Randomize location around the center within a radius
# random_location = center_location + unreal.Vector(
# random.uniform(-move_radius, move_radius),
# random.uniform(-move_radius, move_radius),
# random.uniform(-move_radius/2, move_radius/2)
# )
# frame_number = unreal.FrameNumber(frame)
# # Add location keys
# loc_x_channel.add_key(frame_number, random_location.x)
# loc_y_channel.add_key(frame_number, random_location.y)
# loc_z_channel.add_key(frame_number, random_location.z)
def add_actor_to_layer(actor, layer_name="character"):
layer_subsystem = unreal.get_editor_subsystem(unreal.LayersSubsystem)
# Add the actor to the specified layer๏ผ if it doesn't exist, add_actor_to_layer will create it
layer_subsystem.add_actor_to_layer(actor, layer_name)
def render(output_path, start_frame=0, num_frames=0, mode="rgb"):
subsystem = unreal.get_editor_subsystem(unreal.MoviePipelineQueueSubsystem)
pipelineQueue = subsystem.get_queue()
# delete all jobs before rendering
for job in pipelineQueue.get_jobs():
pipelineQueue.delete_job(job)
ues = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
current_world = ues.get_editor_world()
map_name = current_world.get_path_name()
job = pipelineQueue.allocate_new_job(unreal.MoviePipelineExecutorJob)
job.set_editor_property('map', unreal.SoftObjectPath(map_name))
job.set_editor_property('sequence', unreal.SoftObjectPath('/project/'))
# This is already set (because we duplicated the main queue) but this is how you set what sequence is rendered for this job
job.author = "Voia"
job.job_name = "Synthetic Data"
# Example of configuration loading
if mode == 'rgb':
newConfig = unreal.load_asset("/project/")
elif mode == 'normals':
# This is the normals configuration, which will render normals in the alpha channel
newConfig = unreal.load_asset("/project/")
else:
newConfig = unreal.load_asset("/project/")
# This is how you set the configuration for the job. You can also create a new configuration if you want.
job.set_configuration(newConfig)
# Now we can configure the job. Calling find_or_add_setting_by_class is how you add new settings or find the existing one.
outputSetting = job.get_configuration().find_or_add_setting_by_class(unreal.MoviePipelineOutputSetting)
outputSetting.output_resolution = unreal.IntPoint(1920, 1080) # HORIZONTAL
outputSetting.file_name_format = "Image.{render_pass}.{frame_number}"
outputSetting.flush_disk_writes_per_shot = True # Required for the OnIndividualShotFinishedCallback to get called.
outputSetting.output_directory = unreal.DirectoryPath(path=f'{output_path}/{mode}')
use_custom_playback_range = num_frames > 0
outputSetting.use_custom_playback_range = use_custom_playback_range
outputSetting.custom_start_frame = start_frame
outputSetting.custom_end_frame = start_frame + num_frames
renderPass = job.get_configuration().find_or_add_setting_by_class(unreal.MoviePipelineDeferredPassBase)
# remove default
jpg_settings = job.get_configuration().find_setting_by_class(unreal.MoviePipelineImageSequenceOutput_JPG)
job.get_configuration().remove_setting(jpg_settings)
# if cs_709:
# set_709_color_space(job)
png_settings = job.get_configuration().find_or_add_setting_by_class(unreal.MoviePipelineImageSequenceOutput_PNG)
# png_settings.set_editor_property('write_alpha', False)
# set render presets for given location
# set_render_presets(self.rendering_stage, render_presets)
job.get_configuration().initialize_transient_settings()
# render...
error_callback = unreal.OnMoviePipelineExecutorErrored()
def movie_error(pipeline_executor, pipeline_with_error, is_fatal, error_text):
unreal.log(pipeline_executor)
unreal.log(pipeline_with_error)
unreal.log(is_fatal)
unreal.log(error_text)
error_callback.add_callable(movie_error)
def movie_finished(pipeline_executor, success):
unreal.log('movie finished')
unreal.log(pipeline_executor)
unreal.log(success)
# unreal.log(self.rendering_stage)
# TODO: call render again with normals configuration
if mode == 'rgb':
png_settings.set_editor_property('write_alpha', False)
render(output_path=output_path, mode="normals")
elif mode == 'normals':
png_settings.set_editor_property('write_alpha', True)
render(output_path=output_path, mode="rgb_alpha")
# elif mode == 'normals':
# unreal.SystemLibrary.quit_editor()
finished_callback = unreal.OnMoviePipelineExecutorFinished()
finished_callback.add_callable(movie_finished)
unreal.log("Starting Executor")
# executor = subsystem.render_queue_with_executor(unreal.MoviePipelinePIEExecutor)
global executor
executor = unreal.MoviePipelinePIEExecutor(subsystem)
# if executor:
executor.set_editor_property('on_executor_errored_delegate', error_callback)
executor.set_editor_property('on_executor_finished_delegate', finished_callback)
subsystem.render_queue_with_executor_instance(executor)
if __name__ == '__main__':
# get sequencer and clean it (keep only camera) ยทยทยทยทยทยทยท ``
level_sequence = unreal.load_asset('/project/.RenderSequencer')
clean_sequencer(level_sequence)
# assumin a single camera and a single hdri
camera_rig_rail, camera, target_points, skylight = find_relevant_assets(level_sequence)
# find the intersect of keys
random_key = random.choice(list(target_points.keys()))
target_point = target_points[random_key]
# skylight
random_cubemap(skylight)
# Get the CineCameraComponent from the CineCameraActor
tracking_settings = camera.lookat_tracking_settings
# Set the actor to look at
tracking_settings.actor_to_track = target_point
location = target_point.get_actor_location()
camera_rig_rail_offset = unreal.Vector(-100.0, -30.0, 0.0)
camera_rig_rail.set_actor_location(location + camera_rig_rail_offset, False, False)
# location = unreal.Vector(-990, -290, 0.0)
# # character
# selected_skeletal_mesh_path = select_random_asset('/project/', asset_class='SkeletalMesh')
# selected_skeletal_mesh_path = select_random_asset('/project/', asset_class='SkeletalMesh')
# print(selected_skeletal_mesh_path)
# #exit()
# actor = spawn_actor(asset_path=selected_skeletal_mesh_path, location=location)
# spawnable_actor = level_sequence.add_spawnable_from_instance(actor)
# # animation (Selected random animation)
# selected_animation_path = select_random_asset('/project/')
# selected_animation_path = select_random_asset('/project/')
# add_animation_to_actor(spawnable_actor, animation_path=selected_animation_path)
selected_skeletal_mesh_path = select_random_asset('/project/', asset_class='SkeletalMesh')
a_pose_animation_name = os.path.splitext(selected_skeletal_mesh_path)[-1] + "_Anim"
def not_a_pose_animation(asset:str):
return not asset.endswith(a_pose_animation_name)
baked_animation_directory_path = os.path.dirname(selected_skeletal_mesh_path)
selected_animation_path = select_random_asset(baked_animation_directory_path, asset_class="AnimSequence", predicate=not_a_pose_animation)
print(f"Skeletal Mesh: {selected_skeletal_mesh_path}")
print(f"Animation: {selected_animation_path}")
actor = spawn_actor(asset_path=selected_skeletal_mesh_path, location=location)
add_actor_to_layer(actor, layer_name="character")
spawnable_actor = level_sequence.add_spawnable_from_instance(actor)
add_animation_to_actor(spawnable_actor, animation_path=selected_animation_path)
# delete the original import (keeping only the spawnable actor)
unreal.get_editor_subsystem(unreal.EditorActorSubsystem).destroy_actor(actor)
bind_camera_to_level_sequence(level_sequence, camera, start_frame=0, num_frames=150, move_radius=20)
unreal.log(f"Selected character and animation: {selected_skeletal_mesh_path}, {selected_animation_path}")
# this will render two passes (first is rgb following by normals)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
render(output_path="/project/" + timestamp + "\\", mode="rgb")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import unreal
@unreal.uclass()
class CurveInputExample(unreal.PlacedEditorUtilityBase):
# Use a FProperty to hold the reference to the API wrapper we create in
# run_curve_input_example
_asset_wrapper = unreal.uproperty(unreal.HoudiniPublicAPIAssetWrapper)
@unreal.ufunction(meta=dict(BlueprintCallable=True, CallInEditor=True))
def run_curve_input_example(self):
# Get the API instance
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
# Ensure we have a running session
if not api.is_session_valid():
api.create_session()
# Load our HDA uasset
example_hda = unreal.load_object(None, '/project/.copy_to_curve_1_0')
# Create an API wrapper instance for instantiating the HDA and interacting with it
wrapper = api.instantiate_asset(example_hda, instantiate_at=unreal.Transform())
if wrapper:
# Pre-instantiation is the earliest point where we can set parameter values
wrapper.on_pre_instantiation_delegate.add_function(self, '_set_initial_parameter_values')
# Jumping ahead a bit: we also want to configure inputs, but inputs are only available after instantiation
wrapper.on_post_instantiation_delegate.add_function(self, '_set_inputs')
# Jumping ahead a bit: we also want to print the outputs after the node has cook and the plug-in has processed the output
wrapper.on_post_processing_delegate.add_function(self, '_print_outputs')
self.set_editor_property('_asset_wrapper', wrapper)
@unreal.ufunction(params=[unreal.HoudiniPublicAPIAssetWrapper], meta=dict(CallInEditor=True))
def _set_initial_parameter_values(self, in_wrapper):
""" Set our initial parameter values: disable upvectorstart and set the scale to 0.2. """
# Uncheck the upvectoratstart parameter
in_wrapper.set_bool_parameter_value('upvectoratstart', False)
# Set the scale to 0.2
in_wrapper.set_float_parameter_value('scale', 0.2)
# Since we are done with setting the initial values, we can unbind from the delegate
in_wrapper.on_pre_instantiation_delegate.remove_function(self, '_set_initial_parameter_values')
@unreal.ufunction(params=[unreal.HoudiniPublicAPIAssetWrapper], meta=dict(CallInEditor=True))
def _set_inputs(self, in_wrapper):
""" Configure our inputs: input 0 is a cube and input 1 a helix. """
# Create an empty geometry input
geo_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPIGeoInput)
# Load the cube static mesh asset
cube = unreal.load_object(None, '/project/.Cube')
# Set the input object array for our geometry input, in this case containing only the cube
geo_input.set_input_objects((cube, ))
# Set the input on the instantiated HDA via the wrapper
in_wrapper.set_input_at_index(0, geo_input)
# Create a curve input
curve_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPICurveInput)
# Create a curve wrapper/helper
curve_object = unreal.HoudiniPublicAPICurveInputObject(curve_input)
# Make it a Nurbs curve
curve_object.set_curve_type(unreal.HoudiniPublicAPICurveType.NURBS)
# Set the points of the curve, for this example we create a helix
# consisting of 100 points
curve_points = []
for i in range(100):
t = i / 20.0 * math.pi * 2.0
x = 100.0 * math.cos(t)
y = 100.0 * math.sin(t)
z = i
curve_points.append(unreal.Transform([x, y, z], [0, 0, 0], [1, 1, 1]))
curve_object.set_curve_points(curve_points)
# Set the curve wrapper as an input object
curve_input.set_input_objects((curve_object, ))
# Copy the input data to the HDA as node input 1
in_wrapper.set_input_at_index(1, curve_input)
# unbind from the delegate, since we are done with setting inputs
in_wrapper.on_post_instantiation_delegate.remove_function(self, '_set_inputs')
@unreal.ufunction(params=[unreal.HoudiniPublicAPIAssetWrapper], meta=dict(CallInEditor=True))
def _print_outputs(self, in_wrapper):
""" Print the outputs that were generated by the HDA (after a cook) """
num_outputs = in_wrapper.get_num_outputs()
print('num_outputs: {}'.format(num_outputs))
if num_outputs > 0:
for output_idx in range(num_outputs):
identifiers = in_wrapper.get_output_identifiers_at(output_idx)
print('\toutput index: {}'.format(output_idx))
print('\toutput type: {}'.format(in_wrapper.get_output_type_at(output_idx)))
print('\tnum_output_objects: {}'.format(len(identifiers)))
if identifiers:
for identifier in identifiers:
output_object = in_wrapper.get_output_object_at(output_idx, identifier)
output_component = in_wrapper.get_output_component_at(output_idx, identifier)
is_proxy = in_wrapper.is_output_current_proxy_at(output_idx, identifier)
print('\t\tidentifier: {}'.format(identifier))
print('\t\toutput_object: {}'.format(output_object.get_name() if output_object else 'None'))
print('\t\toutput_component: {}'.format(output_component.get_name() if output_component else 'None'))
print('\t\tis_proxy: {}'.format(is_proxy))
print('')
def run():
# Spawn CurveInputExample and call run_curve_input_example
curve_input_example_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(CurveInputExample.static_class(), unreal.Vector.ZERO, unreal.Rotator())
curve_input_example_actor.run_curve_input_example()
if __name__ == '__main__':
run()
|
import unreal
'''NA Number'''
num = 39
'''NA Number'''
'''Default Directories'''
Basepath = '/project/' + str(num) + '/'
BSAssetPath = Basepath + '/project/'
AnimAssetPath = Basepath + '/Animation/'
'''Default Directories'''
bsNames = ["IdleRun_BS_Peaceful", "IdleRun_BS_Battle", "Down_BS", "Groggy_BS", "LockOn_BS", "Airborne_BS"]
'''Value Set'''
runSpeed = 400
walkSpeed = 150
defaultSamplingVector = unreal.Vector(0.0, 0.0, 0.0)
defaultSampler = unreal.BlendSample()
defaultSampler.set_editor_property("sample_value",defaultSamplingVector)
Anims = unreal.EditorAssetLibrary.list_assets(AnimAssetPath)
'''Value Set end'''
'''BS Samplers Setting START '''
BS_idle_peace_path = BSAssetPath + bsNames[1]
BS_idle_peace = unreal.EditorAssetLibrary.load_asset(BS_idle_peace_path)
defaultSampler.set_editor_property("animation",
'''BS Samplers Setting END '''
i=0
while i < len(Anims):
found_word = Anims[i].find('idle')
if found_word != -1:
print('yes')
|
#! /project/ python
# -*- coding: utf-8 -*-
"""
Initialization module for mca-package
"""
# mca python imports
# software specific imports
import unreal
# mca python imports
def get_content_browser_selection():
eul = unreal.EditorUtilityLibrary()
selected_asset = eul.get_selected_assets()
for selection in selected_asset:
print(selection)
return selected_asset
|
import unreal
import csv
import os
contentPath = 'D:/project/'
filename = "python/project/.csv"
data = []
print('Project Path:', contentPath)
with open(filename, 'r') as file:
csv_reader = csv.reader(file)
next(csv_reader) # Skip the header row
for row in csv_reader:
data.append(row)
from_folder = contentPath + "/from"
to_folder = contentPath + "/to"
file_names = os.listdir(from_folder)
for file_name in file_names:
isFile = file_name.find(row[0])
if isFile != -1:
new_file_name = file_name.replace(row[0], row[1])
new_file_path = os.path.join(to_folder, new_file_name)
file_path = os.path.join(from_folder, file_name)
if not os.path.exists(new_file_path):
os.rename(file_path, new_file_path)
|
import json
import os
import re
import time
from collections.abc import Iterable
from pathlib import Path
from subprocess import run
import unreal
SELECTIVE_OBJECTS = []
projectpath = unreal.Paths.project_plugins_dir()
newpath = projectpath + 'Uiana/project/.json'
f = open(newpath)
JsonMapTypeData = json.load(f)
FILEBROWSER_PATH = os.path.join(os.getenv('WINDIR'), 'explorer.exe')
# -------------------------- #
def clear_level():
AllActors = unreal.EditorLevelLibrary.get_all_level_actors()
for j in AllActors:
unreal.EditorLevelLibrary.destroy_actor(j)
def ReturnBPLoop(data, name):
for lop in data:
if lop["Name"] == name:
return lop
def return_parent(parent_name):
actual_shader_name = parent_name[parent_name.rfind(' ')+1:len(parent_name)]
DefEnv = import_shader(actual_shader_name)
if not DefEnv:
ParentName = "BaseEnv_MAT_V4"
else:
ParentName = actual_shader_name
return ParentName
def reduce_bp_json(BigData):
FullJson = {}
newjson = []
sceneroot = []
ChildNodes = []
GameObjects = []
for fnc in BigData:
fName = fnc["Name"]
fType = fnc["Type"]
if "Properties" not in fnc:
continue
FProps = fnc["Properties"]
if fType == "SimpleConstructionScript":
SceneRoot = FProps["DefaultSceneRootNode"]["ObjectName"]
sceneroot.append(SceneRoot[SceneRoot.rfind(':') + 1:len(SceneRoot)])
if "Node" in fName:
Name = FProps["ComponentTemplate"]["ObjectName"]
ActualName = Name[Name.rfind(':') + 1:len(Name)]
Component = ReturnBPLoop(BigData, ActualName)
FProps["CompProps"] = Component["Properties"] if "Properties" in Component else None
newjson.append(fnc)
if has_key("ChildNodes", FProps):
for CN in FProps["ChildNodes"]:
ChildObjectName = CN["ObjectName"]
ChildName = ChildObjectName[ChildObjectName.rfind('.') + 1:len(ChildObjectName)]
ChildNodes.append(ChildName)
if fName == "GameObjectMesh":
GameObjects.append(fnc)
FullJson["Nodes"] = newjson
FullJson["SceneRoot"] = sceneroot
FullJson["GameObjects"] = GameObjects
FullJson["ChildNodes"] = ChildNodes
return FullJson
def check_export(settings):
exp_path = settings.selected_map.folder_path.joinpath("exported.yo")
if exp_path.exists():
exp_data = json.load(open(exp_path))
val_version = exp_data[0]
current_val_version = get_valorant_version()
if settings.val_version != val_version or settings.dev_force_reexport:
return True
else:
return True
return False
def write_export_file():
new_json = [get_valorant_version()]
json_object = json.dumps(new_json, indent=4)
return json_object
def get_cubemap_texture(Seting):
pathCube = Seting["ObjectName"]
newtext = pathCube.replace("TextureCube ", "")
AssetPath = (f'/project/{newtext}.{newtext}')
TextureCubeMap = unreal.load_asset(AssetPath)
return TextureCubeMap
def get_valorant_version():
val_file = "/project/ Games\\Metadata\\valorant.live\\valorant.live.ok"
if os.path.exists(val_file):
with open(val_file, "r") as f:
file = f.read()
split_file = file.split('\n')
return split_file[0].split('/')[-1].split('.')[0]
else:
return None
def get_ies_texture(setting):
pathIES = setting["ObjectName"]
StartNewTextureName = pathIES
NewTextureName = return_formatted_string(StartNewTextureName, "_")
AssetPath = (f'/project/{NewTextureName}.{NewTextureName}')
TextureIES = unreal.load_asset(AssetPath)
return TextureIES
def set_material_vector_value(Mat, ParamName, Value):
unreal.MaterialEditingLibrary.set_material_instance_vector_parameter_value(Mat, ParamName, Value)
unreal.MaterialEditingLibrary.update_material_instance(Mat)
def set_material_scalar_value(Mat, ParamName, Value):
unreal.MaterialEditingLibrary.set_material_instance_scalar_parameter_value(Mat, ParamName, Value)
unreal.MaterialEditingLibrary.update_material_instance(Mat)
def GetReadableUMapType(mapname):
for j in JsonMapTypeData:
NewMapName = j["Name"]
MapType = j["StreamingType"]
if mapname == NewMapName:
return MapType
BLACKLIST = [
"navmesh",
"_breakable",
"_collision",
"windstreaks_plane",
"sm_port_snowflakes_boundmesh",
"M_Pitt_Caustics_Box",
"box_for_volumes",
"BombsiteMarker_0_BombsiteA_Glow",
"BombsiteMarker_0_BombsiteB_Glow",
"_col",
"M_Pitt_Lamps_Glow",
"SM_Pitt_Water_Lid",
"Bombsite_0_ASiteSide",
"Bombsite_0_BSiteSide"
"For_Volumes",
"Foxtrot_ASite_Plane_DU",
"Foxtrot_ASite_Side_DU",
"BombsiteMarker_0_BombsiteA_Glow",
"BombsiteMarker_0_BombsiteB_Glow",
"DirtSkirt",
"Tech_0_RebelSupplyCargoTarpLargeCollision",
]
def get_umap_type(mapname):
for j in JsonMapTypeData:
NewMapName = j["Name"]
MapType = j["StreamingType"]
if mapname == NewMapName:
return eval(f'unreal.{MapType}')
def import_shader(Shader):
BaseShader = unreal.load_asset(f'/project/{Shader}')
return BaseShader
def return_object_name(name):
rformPar = name.rfind(' ') + 1
return name[rformPar:len(name)]
def mesh_to_asset(Mesh, Type, ActualType):
if Mesh == None:
return None
Name = Mesh["ObjectName"]
typestring = str(Type)
NewName = Name.replace(f'{Type}', "")
PathToGo = f'/project/{ActualType}/{NewName}'
return unreal.load_asset(PathToGo)
def path_convert(path: str) -> str:
b, c, rest = path.split("\\", 2)
if b == "ShooterGame":
b = "Game"
if c == "Content":
c = ""
return "\\".join((b, c, rest))
def get_scene_transform(prop):
quat = unreal.Quat()
LocationUnreal = unreal.Vector(0.0, 0.0, 0.0)
ScaleUnreal = unreal.Vector(1.0, 1.0, 1.0)
RotationUnreal = unreal.Rotator(0.0, 0.0, 0.0)
if has_key("SceneAttachRelativeLocation",prop):
loc = prop["SceneAttachRelativeLocation"]
LocationUnreal = unreal.Vector(loc["X"], loc["Y"], loc["Z"])
if has_key("SceneAttachRelativeLocation",prop):
rot = prop["SceneAttachRelativeRotation"]
RotationUnreal = unreal.Rotator(rot["Roll"], rot["Pitch"], rot["Yaw"])
if has_key("SceneAttachRelativeLocation",prop):
scale = prop["SceneAttachRelativeScale3D"]
ScaleUnreal = unreal.Vector(scale["X"], scale["Y"], scale["Z"])
return unreal.Transform(LocationUnreal, RotationUnreal, ScaleUnreal)
def get_transform(Prop):
TransformData = None
bIsInstanced = False
Props = Prop
Quat = unreal.Quat()
if has_key("TransformData", Props):
TransformData = Props["TransformData"]
bIsInstanced = True
if has_key("RelativeLocation", Props) or has_key("OffsetLocation", Props) or has_key("Translation", TransformData) :
if bIsInstanced:
Location = TransformData["Translation"]
else:
Location = Props["RelativeLocation"]
LocationUnreal = unreal.Vector(Location["X"], Location["Y"], Location["Z"])
else:
LocationUnreal = unreal.Vector(0.0, 0.0, 0.0)
if has_key("RelativeScale3D", Props) or has_key("Scale3D", TransformData):
if bIsInstanced:
Scale = TransformData["Scale3D"]
ScaleUnreal = unreal.Vector(Scale["X"], Scale["Y"], Scale["Z"])
else:
Scale = Props["RelativeScale3D"]
ScaleUnreal = unreal.Vector(Scale["X"], Scale["Y"], Scale["Z"])
else:
ScaleUnreal = unreal.Vector(1.0, 1.0, 1.0)
if has_key("RelativeRotation", Props) or has_key("Rotation", TransformData):
if bIsInstanced:
Rotation = TransformData["Rotation"]
Quat = unreal.Quat(Rotation["X"], Rotation["Y"], Rotation["Z"], Rotation["W"])
RotationUnreal = unreal.Rotator(0.0, 0.0, 0.0)
else:
Rotation = Props["RelativeRotation"]
RotationUnreal = unreal.Rotator(Rotation["Roll"], Rotation["Pitch"], Rotation["Yaw"])
else:
RotationUnreal = unreal.Rotator(0.0, 0.0, 0.0)
Trans = unreal.Transform(LocationUnreal, RotationUnreal, ScaleUnreal)
if bIsInstanced:
Trans.set_editor_property("rotation", Quat)
return Trans
def has_key(key, array):
if array == None:
return False
if key in array:
return True
else:
return False
def GetClassName(self):
return type(self).__name__
def return_formatted_string(string, prefix):
start = string.rfind(prefix) + 1
end = len(string)
return string[start:end]
def has_transform(prop):
bFactualBool = False
if has_key("AttachParent", prop):
bFactualBool = False
if has_key("RelativeLocation", prop):
bFactualBool = True
if has_key("SceneAttachRelativeLocation", prop):
bFactualBool = True
if has_key("SceneAttachRelativeRotation", prop):
bFactualBool = True
if has_key("SceneAttachRelativeScale3D", prop):
bFactualBool = True
if has_key("RelativeRotation", prop):
bFactualBool = True
if has_key("RelativeScale3D", prop):
bFactualBool = True
if bFactualBool:
return get_transform(prop)
return bFactualBool
def return_python_unreal_enum(value):
ind = 0
value = re.sub(r'([a-z])([A-Z])', r'\1_\2', value)
if value[0] == "_":
ind = 1
return value[ind:len(value)].upper()
def filter_objects(umap_DATA, current_umap_name) -> list:
objects = umap_DATA
filtered_list = []
# Debug check
if SELECTIVE_OBJECTS:
for filter_model_name in SELECTIVE_OBJECTS:
for og_model in objects:
object_type = get_object_type(og_model)
if object_type == "mesh":
if filter_model_name in og_model["Properties"]["StaticMesh"]["ObjectPath"]:
og_model["Name"] = og_model["Properties"]["StaticMesh"]["ObjectPath"]
filtered_list.append(og_model)
elif object_type == "decal":
if filter_model_name in og_model["Outer"]:
og_model["Name"] = og_model["Outer"]
filtered_list.append(og_model)
elif object_type == "light":
if filter_model_name in og_model["Outer"]:
og_model["Name"] = og_model["Outer"]
filtered_list.append(og_model)
else:
filtered_list = objects
new_list = []
# Check for blacklisted items
for og_model in filtered_list:
objname = get_obj_name(data=og_model, mat=False)
if not objname:
continue
model_name_lower = objname.lower()
if is_blacklisted(model_name_lower):
continue
else:
new_list.append(og_model)
return new_list
def is_blacklisted(object_name: str) -> bool:
for blocked in BLACKLIST:
if blocked.lower() in object_name.lower():
return True
return False
def get_obj_name(data: dict, mat: bool):
if mat:
s = data["ObjectPath"]
else:
if has_key("Properties", data) == False:
return "None"
if "StaticMesh" in data["Properties"]:
d = data["Properties"]["StaticMesh"]
if not d:
return None
s = d["ObjectPath"]
else:
if not has_key("Outer", data):
return None
s = data["Outer"]
k = get_name(s)
return k
def get_name(s: str) -> str:
return Path(s).stem
def cast(object_to_cast=None, object_class=None):
try:
return object_class.cast(object_to_cast)
except:
return None
def open_folder(path):
"""
Open a file explorer to a path
:param path: path to folder
:return:
"""
path = os.path.normpath(path)
if os.path.isdir(path):
run([FILEBROWSER_PATH, path])
def get_files(path: str, extension: str = "") -> list:
"""
Get all files in a directory
:param path: path to directory
:param extension: extension of files to get
:return: list of files
"""
files = list()
for file in os.listdir(path):
if extension in file:
files.append(Path(os.path.join(path, file)))
return files
def open_folder(path):
"""
Open a file explorer to a path
:param path: path to folder
:return:
"""
path = os.path.normpath(path)
if os.path.isdir(path):
run([FILEBROWSER_PATH, path])
def save_list(filepath: Path, lines: list):
"""
Save a list to a file
:param filepath: path to file
:param lines: list of lines
:return:
"""
# Flatten umap objects
lines = list(flatten_list(lines))
# Remove Duplicates
lines = list(dict.fromkeys(lines))
with open(filepath.__str__(), 'w') as f:
f.write('\n'.join(lines))
return filepath.__str__()
def save_json(p: str, d):
"""
Save a dictionary to a json file
:param p: path to file
:param d: dictionary
:return:
"""
with open(p, 'w') as jsonfile:
json.dump(d, jsonfile, indent=4)
def read_json(p: str) -> dict:
"""
Read a json file and return a dictionary
:param p: path to file
:return:
"""
with open(p) as json_file:
return json.load(json_file)
def get_mat(decal_dict):
mat_name = get_obj_name(data=decal_dict, mat=True)
return unreal.load_asset(
f'/project/{mat_name}.{mat_name}')
def get_scene_parent(obj, OuterName, umapfile):
types = ["SceneComponent", "BrushComponent", "StaticMeshComponent"]
if OuterName == 'PersistentLevel':
OuterName = obj["Name"]
for j in umapfile:
tipo = j["Type"]
if not has_key("Outer", j):
continue
outer = j["Outer"]
if outer == "PersistentLevel":
outer = j["Name"]
# print(f'OuterName trying to find is {OuterName} and current outer is {outer} // also tipo is {tipo}')
if has_key("Properties", j) == False:
continue
KeyOuter = has_key("AttachParent", j["Properties"])
if outer == OuterName and tipo in types and KeyOuter == False:
return has_transform(j["Properties"])
# exit()
def set_unreal_prop(self,prop_name,prop_value):
try:
self.set_editor_property(prop_name,prop_value)
except:
print(f'UianaPropLOG: Error setting {prop_name} to {prop_value}')
def shorten_path(file_path, length) -> str:
"""
Shorten a path to a given length
:param file_path: path to shorten
:param length: length to shorten to
:return: shortened path
"""
return f"..\{os.sep.join(file_path.split(os.sep)[-length:])}"
def flatten_list(collection):
"""
Flatten a list of lists
:param collection: list of lists
:return: list
"""
for x in collection:
if isinstance(x, Iterable) and not isinstance(x, str):
yield from flatten_list(x)
else:
yield x
def create_folders(self):
for attr, value in self.__dict__.items():
if "path" in attr:
f = Path(value)
if not f.exists():
print(f"Creating folder {f}")
f.mkdir(parents=True)
# ANCHOR: Classes
# -------------------------- #
class Settings:
def __init__(self, UESet):
self.aes = UESet.vAesKey
self.texture_format = ".png"
########## have to fix so it gets actual dir
self.script_root = UESet.PPluginPath
self.tools_path = self.script_root.joinpath("tools")
self.importer_assets_path = self.script_root.joinpath("assets")
self.paks_path = UESet.PPakFolder
self.import_decals = UESet.bImportDecal
self.import_blueprints = UESet.bImportBlueprint
self.import_lights = UESet.bImportLights
self.import_Mesh = UESet.bImportMesh
self.import_materials = UESet.bImportMaterial
self.import_sublevel = UESet.bImportSubLevels
self.manual_lmres_mult = UESet.iManualLMResMult
self.combine_umaps = False
self.val_version = get_valorant_version()
self.dev_force_reexport = False
self.export_path = UESet.PExportPath
self.assets_path = self.export_path.joinpath("export")
self.maps_path = self.export_path.joinpath("maps")
self.umodel = self.script_root.joinpath("tools", "umodel.exe")
self.debug = False
self.cue4extractor = self.script_root.joinpath("tools", "cue4extractor.exe")
self.log = self.export_path.joinpath("import.log")
self.umap_list_path = self.importer_assets_path.joinpath("umaps.json")
self.umap_list = read_json(self.umap_list_path)
self.selected_map = Map(UESet.fMapName, self.maps_path, self.umap_list)
self.shaders = [
"VALORANT_Base",
"VALORANT_Decal",
"VALORANT_Emissive",
"VALORANT_Emissive_Scroll",
"VALORANT_Hologram",
"VALORANT_Glass",
"VALORANT_Blend",
"VALORANT_Decal",
"VALORANT_MRA_Splitter",
"VALORANT_Normal_Fix",
"VALORANT_Screen"
]
create_folders(self)
## Map Definitions
class Map:
def __init__(self, selected_map_name: str, maps_path: Path, all_umaps: list):
self.name = selected_map_name
# print(maps_path, self.name)
self.folder_path = maps_path.joinpath(self.name)
self.umaps = all_umaps[self.name]
# print(self)
self.actors_path = self.folder_path.joinpath("actors")
self.materials_path = self.folder_path.joinpath("materials")
self.materials_ovr_path = self.folder_path.joinpath("materials_ovr")
self.objects_path = self.folder_path.joinpath("objects")
self.scenes_path = self.folder_path.joinpath("scenes")
self.umaps_path = self.folder_path.joinpath("umaps")
create_folders(self)
# Actor definitions
class actor_defs():
def __init__(self, Actor):
self.data = Actor
self.name = Actor["Name"] if has_key("Name", Actor) else None
self.type = Actor["Type"] if has_key("Type", Actor) else None
self.props = Actor["Properties"] if has_key("Properties", Actor) else {}
## add new attribute that gets every key from props that starts with "SceneAttach" and adds it to a dict if Actor has_key else none
self.scene_props = {k: v for k, v in self.props.items() if k.startswith("SceneAttach")} if has_key("SceneAttach", self.props) else None
self.outer = Actor["Outer"] if has_key("Outer", Actor) else None
self.transform = has_transform(self.props)
self.debug = f'ActorName: {self.name} // ActorType: {self.type} // ActorOuter: {self.outer} // ActorTransform: {self.transform} // ActorProps: {self.props.keys()}'
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the OpenTimelineIO project
import re
import unreal
import opentimelineio as otio
from opentimelineio.opentime import RationalTime, TimeRange
from .util import METADATA_KEY_UE, METADATA_KEY_SUB_SEQ
class ShotSectionProxy(object):
"""Shot section wrapper."""
def __init__(self, shot_section, parent):
"""
Args:
shot_section (unreal.MovieSceneCinematicShotSection): Shot
section.
parent (LevelSequenceProxy): Proxy for section's parent
level sequence.
"""
self.section = shot_section
self.parent = parent
def get_start_frame_offset(self):
"""Calculate start frame offset for this section's
sub-sequence.
Returns:
int: Offset in frames
"""
start_ticks_offset = self.section.parameters.start_frame_offset.value
ticks_per_frame = self.parent.get_ticks_per_frame()
return round(float(start_ticks_offset) / float(ticks_per_frame))
def set_start_frame_offset(self, frames):
"""Set start frame offset for this section's sub-sequence.
Args:
frames (int): Start frame offset
"""
ticks_per_frame = self.parent.get_ticks_per_frame()
self.section.parameters.start_frame_offset.value = frames * ticks_per_frame
def get_time_scale(self):
"""Return this section's time scale, which scales the playback
range of its sub-sequence.
Returns:
float: Time scalar
"""
time_scale = self.section.parameters.time_scale
if hasattr(unreal, 'MovieSceneTimeWarpVariant'): # UE 5.5+
time_scale = time_scale.to_fixed_play_rate()
return time_scale
def set_time_scale(self, time_scale):
"""Set this section's time scale, which scales the playback
range of its sub-sequence.
Args:
time_scale (float): Time scale value
"""
if hasattr(unreal, 'MovieSceneTimeWarpVariant'):
time_scale = unreal.MovieSceneTimeWarpVariant(time_scale)
self.section.parameters.time_scale = time_scale
def get_range_in_parent(self):
"""Calculate OTIO item range within its parent track.
Returns:
otio.opentime.TimeRange: Section range
"""
frame_rate = self.parent.get_frame_rate()
# NOTE: section.get_*_frame() methods always use floor rounding, so
# we round from float-seconds here ourselves.
parent_start_frame = round(self.section.get_start_frame_seconds() * frame_rate)
parent_end_frame = round(self.section.get_end_frame_seconds() * frame_rate)
parent_duration = parent_end_frame - parent_start_frame
return TimeRange(
start_time=RationalTime(parent_start_frame, frame_rate),
duration=RationalTime(parent_duration, frame_rate),
)
def update_from_item_range(self, item):
"""Update section range within its parent track from an OTIO
item.
Args:
item (otio.schema.Item): Item to update ranges from
"""
range_in_parent = item.range_in_parent()
if self.parent.global_start_time is not None:
start_frames = (
range_in_parent.start_time + self.parent.global_start_time
).to_frames()
end_frames = start_frames + range_in_parent.duration.to_frames()
else:
start_frames = range_in_parent.start_time.to_frames()
end_frames = range_in_parent.end_time_exclusive().to_frames()
self.section.set_range(start_frames, end_frames)
def update_effects(self, item):
"""Add effects needed to represent this section to an OTIO
item.
Args:
item (otio.schema.Item): Item to add effects to
"""
time_scale = self.get_time_scale()
if time_scale != 1.0:
item.effects.append(otio.schema.LinearTimeWarp(time_scalar=time_scale))
def update_from_effects(self, item):
"""Update shot section properties from OTIO item effects.
Args:
item (otio.schema.Item): item to get effects from
"""
time_scale = 1.0
for effect in item.effects:
if isinstance(effect, otio.schema.LinearTimeWarp):
time_scale *= effect.time_scalar
self.set_time_scale(time_scale)
def update_metadata(self, item):
"""Serialize shot section properties into OTIO item metadata.
Args:
item (otio.schema.Item): Item to set metadata on
"""
timecode_source = self.section.get_editor_property("timecode_source")
timecode_obj = timecode_source.get_editor_property("timecode")
timecode_str = "{h:02d}:{m:02d}:{s:02d}{sep}{f:02d}".format(
sep=":" if not timecode_obj.drop_frame_format else ";",
h=timecode_obj.hours,
m=timecode_obj.minutes,
s=timecode_obj.seconds,
f=timecode_obj.frames,
)
# NOTE: start_frame_offset and time_scale are omitted here since they
# will factor into a clip's source range and effects.
metadata = {
"timecode": timecode_str,
"is_active": self.section.is_active(),
"is_locked": self.section.is_locked(),
"pre_roll_frames": self.section.get_pre_roll_frames(),
"post_roll_frames": self.section.get_post_roll_frames(),
"can_loop": self.section.parameters.can_loop,
"end_frame_offset": self.section.parameters.end_frame_offset.value,
"first_loop_start_frame_offset":
self.section.parameters.first_loop_start_frame_offset.value,
"hierarchical_bias": self.section.parameters.hierarchical_bias,
METADATA_KEY_SUB_SEQ: self.section.get_sequence().get_path_name(),
"network_mask": self.section.get_editor_property("network_mask"),
}
item.metadata[METADATA_KEY_UE] = metadata
def update_from_metadata(self, item):
"""Update shot section properties from deserialized OTIO item
metadata.
Args:
item (otio.schema.Item): item to get metadata from
"""
metadata = item.metadata.get(METADATA_KEY_UE)
if not metadata:
return
timecode_source = self.section.get_editor_property("timecode_source")
if "timecode" in metadata:
timecode_match = re.match(
r"^"
r"(?P<h>\d{2}):"
r"(?P<m>\d{2}):"
r"(?P<s>\d{2})(?P<sep>[:;])"
r"(?P<f>\d{2})"
r"$",
metadata["timecode"],
)
if timecode_match:
timecode_obj = unreal.Timecode(
hours=int(timecode_match.group("h")),
minutes=int(timecode_match.group("m")),
seconds=int(timecode_match.group("s")),
frames=int(timecode_match.group("f")),
drop_frame_format=timecode_match.group("sep") == ";",
)
timecode_source.set_editor_property("timecode", timecode_obj)
# NOTE: METADATA_KEY_SUB_SEQ is omitted here since it should have
# already been applied by the calling code.
# NOTE: start_frame_offset and time_scale are omitted here since they
# will factor into a clip's source range and effects.
if "is_active" in metadata:
self.section.set_is_active(metadata["is_active"])
if "is_locked" in metadata:
self.section.set_is_locked(metadata["is_locked"])
if "pre_roll_frames" in metadata:
self.section.set_pre_roll_frames(metadata["pre_roll_frames"])
if "post_roll_frames" in metadata:
self.section.set_post_roll_frames(metadata["post_roll_frames"])
if "can_loop" in metadata:
self.section.parameters.can_loop = metadata["can_loop"]
if "end_frame_offset" in metadata:
self.section.parameters.end_frame_offset.value = metadata[
"end_frame_offset"
]
if "first_loop_start_frame_offset" in metadata:
self.section.parameters.first_loop_start_frame_offset.value = metadata[
"first_loop_start_frame_offset"
]
if "hierarchical_bias" in metadata:
self.section.parameters.hierarchical_bias = metadata["hierarchical_bias"]
if "network_mask" in metadata:
self.section.set_editor_property("network_mask", metadata["network_mask"])
|
import unreal
_PYTHON_INTERPRETER_PATH = unreal.get_interpreter_executable_path()
class Profiler():
def attach_debugger():
import debugpy
debugpy.configure(python=_PYTHON_INTERPRETER_PATH)
debugpy.listen(5678)
print('Waiting for a debugger to attach...')
debugpy.wait_for_client()
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" An example script that uses the API to instantiate an HDA and then set
the ramp points of a float ramp and a color ramp.
"""
import math
import unreal
_g_wrapper = None
def get_test_hda_path():
return '/project/.ramp_example_1_0'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def set_parameters(in_wrapper):
print('set_parameters')
# Unbind from the delegate
in_wrapper.on_post_instantiation_delegate.remove_callable(set_parameters)
# There are two ramps: heightramp and colorramp. The height ramp is a float
# ramp. As an example we'll set the number of ramp points and then set
# each point individually
in_wrapper.set_ramp_parameter_num_points('heightramp', 6)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 0, 0.0, 0.1)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 1, 0.2, 0.6)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 2, 0.4, 1.0)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 3, 0.6, 1.4)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 4, 0.8, 1.8)
in_wrapper.set_float_ramp_parameter_point_value('heightramp', 5, 1.0, 2.2)
# For the color ramp, as an example, we can set the all the points via an
# array.
in_wrapper.set_color_ramp_parameter_points('colorramp', (
unreal.HoudiniPublicAPIColorRampPoint(position=0.0, value=unreal.LinearColor.GRAY),
unreal.HoudiniPublicAPIColorRampPoint(position=0.5, value=unreal.LinearColor.GREEN),
unreal.HoudiniPublicAPIColorRampPoint(position=1.0, value=unreal.LinearColor.RED),
))
def print_parameters(in_wrapper):
print('print_parameters')
in_wrapper.on_post_processing_delegate.remove_callable(print_parameters)
# Print the ramp points directly
print('heightramp: num points {0}:'.format(in_wrapper.get_ramp_parameter_num_points('heightramp')))
heightramp_data = in_wrapper.get_float_ramp_parameter_points('heightramp')
if not heightramp_data:
print('\tNone')
else:
for idx, point_data in enumerate(heightramp_data):
print('\t\t{0}: position={1:.6f}; value={2:.6f}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
print('colorramp: num points {0}:'.format(in_wrapper.get_ramp_parameter_num_points('colorramp')))
colorramp_data = in_wrapper.get_color_ramp_parameter_points('colorramp')
if not colorramp_data:
print('\tNone')
else:
for idx, point_data in enumerate(colorramp_data):
print('\t\t{0}: position={1:.6f}; value={2}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
# Print all parameter values
param_tuples = in_wrapper.get_parameter_tuples()
print('parameter tuples: {}'.format(len(param_tuples) if param_tuples else 0))
if param_tuples:
for param_tuple_name, param_tuple in param_tuples.items():
print('parameter tuple name: {}'.format(param_tuple_name))
print('\tbool_values: {}'.format(param_tuple.bool_values))
print('\tfloat_values: {}'.format(param_tuple.float_values))
print('\tint32_values: {}'.format(param_tuple.int32_values))
print('\tstring_values: {}'.format(param_tuple.string_values))
if not param_tuple.float_ramp_points:
print('\tfloat_ramp_points: None')
else:
print('\tfloat_ramp_points:')
for idx, point_data in enumerate(param_tuple.float_ramp_points):
print('\t\t{0}: position={1:.6f}; value={2:.6f}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
if not param_tuple.color_ramp_points:
print('\tcolor_ramp_points: None')
else:
print('\tcolor_ramp_points:')
for idx, point_data in enumerate(param_tuple.color_ramp_points):
print('\t\t{0}: position={1:.6f}; value={2}; interpoloation={3}'.format(
idx,
point_data.position,
point_data.value,
point_data.interpolation
))
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset with auto-cook enabled
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform())
# Set the float and color ramps on post instantiation, before the first
# cook.
_g_wrapper.on_post_instantiation_delegate.add_callable(set_parameters)
# Print the parameter state after the cook and output creation.
_g_wrapper.on_post_processing_delegate.add_callable(print_parameters)
if __name__ == '__main__':
run()
|
# Copyright (C) 2024 Louis Vottero [email protected] All rights reserved.
from __future__ import absolute_import
import os
import sys
import traceback
import string
import subprocess
import inspect
import collections
from functools import wraps
from .. import util
from .. import util_file
from .. import data
in_maya = False
in_unreal = False
def decorator_undo_chunk(function):
@wraps(function)
def wrapper(*args, **kwargs):
return_value = None
try:
return_value = function(*args, **kwargs)
except:
pass
return return_value
return wrapper
if util.is_in_maya():
in_maya = True
import maya.cmds as cmds
from vtool.maya_lib import core
decorator_undo_chunk = core.undo_chunk
if util.in_unreal:
in_unreal = True
import unreal
from .. import unreal_lib
from vtool import logger, unreal_lib
log = logger.get_logger(__name__)
log.info('Accessing')
def get_current_process_instance():
path = util_file.get_current_vetala_process_path()
process_inst = Process()
process_inst.set_directory(path)
return process_inst
def find_processes(directory=None, return_also_non_process_list=False, stop_at_one=False):
"""
This will try to find the processes in the supplied directory.
Args:
directory(str): The directory to search for processes.
return_also_non_process_list (bool): TODO: Fill description.
stop_at_one (bool): TODO: Fill description.
Returns:
list: The processes in the directory.
"""
found = []
found_non = []
if not directory:
if return_also_non_process_list:
return [found, found_non]
else:
return found
log.debug('Find Processes %s' % directory)
dirs = []
try:
dirs = os.listdir(directory)
except:
pass
for folder in dirs:
if stop_at_one:
# only check found not found_non, because function is find "processes"
if found:
break
if found_non and return_also_non_process_list:
break
if folder.startswith('.'):
continue
full_path = util_file.join_path(directory, folder)
if is_process(full_path):
found.append(folder)
else:
if return_also_non_process_list:
if is_interesting_folder(folder, directory):
found_non.append(folder)
if not return_also_non_process_list:
return found
if return_also_non_process_list:
return [found, found_non]
def is_interesting_folder(folder_name, directory):
full_path = util_file.join_path(directory, folder_name)
if folder_name.find('.') > -1:
if not folder_name.startswith('.'):
if not util_file.is_file(full_path):
return True
else:
return True
return False
def is_process(directory):
if not directory:
return False
code_path = util_file.join_path(directory, '.code')
if not util_file.exists(code_path):
return False
return True
def is_process_enabled(directory):
path = directory
enable_path = util_file.join_path(path, Process.enable_filename)
if util_file.exists(enable_path):
return True
return False
def get_unused_process_name(directory=None, name=None):
"""
This will try to find a process named process in the directory.
It will increment the name to process1 and beyond until it finds a unique name.
If no directory supplied, it will search the current working directory.
Args:
directory (str): Directory to search for processes.
name (str): name to give the process.
Returns:
str: The unique process name.
"""
if not directory:
directory = util_file.get_cwd()
processes = find_processes(directory)
if name is None:
name = Process.description
new_name = name
not_name = True
inc = 1
while not_name:
if new_name in processes:
new_name = util.increment_last_number(new_name)
if new_name not in processes:
not_name = False
if inc > 1000:
break
return new_name
__internal_script_running = None
def decorator_process_run_script(function):
# decorator meant only to work with run_script, not to be used
@wraps(function)
def wrapper(self, script, hard_error=True, settings=None, return_status=False):
self.current_script = script
if in_maya:
core.refresh()
global __internal_script_running
if __internal_script_running is None:
__internal_script_running = True
reset = True
util.start_temp_log()
try:
if in_maya:
cmds.undoInfo(openChunk=True)
except:
print(traceback.format_exc())
util.warning('Trouble prepping maya for script')
put = None
if self._data_override:
put = self._data_override._put
else:
put = self._put
self._put._cache_feedback = {}
reset_process_builtins(self, {'put': put})
value = None
if in_maya:
mode = cmds.evaluationManager(query=True, mode=True)[0]
cmds.evaluationManager(mode='off')
cmds.evaluator(name='cache', enable=0)
try:
if not core.is_batch():
if not cmds.ogs(q=True, pause=True):
cmds.ogs(pause=True)
value = function(self, script, hard_error, settings, return_status)
if not core.is_batch():
if cmds.ogs(q=True, pause=True):
cmds.ogs(pause=True)
util.global_tabs = 1
except:
print(traceback.format_exc())
if not core.is_batch():
if cmds.ogs(q=True, pause=True):
cmds.ogs(pause=True)
cmds.evaluationManager(mode=mode)
else:
value = function(self, script, hard_error, settings, return_status)
if 'reset' in locals():
__internal_script_running = None
put = None
if self._data_override:
put = self._data_override._put
else:
put = self._put
reset_process_builtins(self, {'put': put})
if in_maya:
cmds.undoInfo(closeChunk=True)
util.end_temp_log()
if self.current_script:
self.current_script = None
return value
return wrapper
class Process(object):
"""
This class has functions to work on individual processes in the Process Manager.
"""
description = 'process'
data_folder_name = '.data'
code_folder_name = '.code'
ramen_folder_name = '.ramen'
backup_folder_name = '.backup'
process_data_filename = 'manifest.data'
enable_filename = '.enable'
def __init__(self, name=None):
self.runtime_values = None
self._put = None
self._runtime_values = None
self._data_override = None
self.option_settings = None
log.debug('Initialize process %s' % name)
self.directory = util_file.get_cwd()
self.process_name = name
self._data_parent_folder = None
self.external_code_paths = []
self._reset()
self._update_options = True
self._option_result_function = None
self._skip_children = None
self._unreal_skeletal_mesh = None
def _reset(self):
self.parts = []
self.option_values = {}
self.option_settings = None
self.settings = None
self._control_inst = None
self._data_override = None
self._runtime_globals = {}
self.reset_runtime()
self._data_folder = ''
def _setup_options(self):
if not self.option_settings or self._update_options:
self._load_options()
def _load_options(self):
log.debug('Setup options')
options = util_file.SettingsFile()
self.option_settings = options
self.option_settings.set_directory(self._get_override_path(), 'options.json')
def _setup_settings(self):
if not self.settings:
log.debug('Setup process settings')
settings = util_file.SettingsFile()
self.settings = settings
self.settings.set_directory(self._get_override_path(), 'settings.json')
def _set_name(self, new_name):
new_name = new_name.strip()
self.process_name = new_name
def _handle_old_folders(self, path):
# here temporarily until old paths are out of use...
# could take a long time.
if util_file.is_dir(self.get_code_path()):
return
old_data_name = self.data_folder_name.replace('.', '_')
old_code_name = self.code_folder_name.replace('.', '_')
old_data_path = util_file.join_path(path, old_data_name)
old_code_path = util_file.join_path(path, old_code_name)
if util_file.is_dir(old_data_path):
util_file.rename(old_data_path, self.data_folder_name)
if util_file.is_dir(old_code_path):
util_file.rename(old_code_path, self.code_folder_name)
def _create_folder(self):
path = util_file.create_dir(self.process_name, self.directory)
if path and util_file.is_dir(path):
self._handle_old_folders(path)
util_file.create_dir(self.data_folder_name, path)
code_folder = util_file.create_dir(self.code_folder_name, path)
util_file.create_dir(self.ramen_folder_name, path)
util_file.create_dir(self.backup_folder_name, path)
manifest_folder = util_file.join_path(code_folder, 'manifest')
if not util_file.is_dir(manifest_folder):
self.create_code('manifest', 'script.manifest')
return path
def _create_sub_data_folder(self, data_name):
data_path = self.get_data_folder(data_name)
path = util_file.create_dir('.sub', data_path)
return path
def _get_path(self, name):
directory = util_file.join_path(self.get_path(), name)
return directory
def _get_override_path(self):
if not self._data_override:
return self.get_path()
if self._data_override:
return self._data_override.get_path()
def _get_relative_process_path(self, relative_path, from_override=False):
path = None
if not from_override:
path = self.get_path()
if from_override:
path = self._get_override_path()
if not path:
return None, None
split_path = path.split('/')
split_relative_path = relative_path.split('/')
up_directory = 0
new_sub_path = []
new_path = []
for sub_path in split_relative_path:
if sub_path == '..':
up_directory += 1
if sub_path != '..':
new_sub_path.append(sub_path)
if up_directory:
new_path = split_path[:-up_directory]
new_path = new_path + new_sub_path
if up_directory == 0:
new_path = split_path + split_relative_path
new_path_test = '/'.join(new_path)
if not util_file.is_dir(new_path_test):
temp_split_path = list(split_path)
temp_split_path.reverse()
found_path = []
for inc in range(0, len(temp_split_path)):
if temp_split_path[inc] == split_relative_path[0]:
found_path = temp_split_path[inc + 1:]
found_path.reverse()
new_path = found_path + split_relative_path
process_name = '/'.join([new_path[-1]])
process_path = '/'.join(new_path[:-1])
util.show('Relative process name: %s and path: %s' % (process_name, process_path))
return process_name, process_path
def _get_parent_process_path(self, from_override=False):
process_path = None
if not from_override:
process_path = self.get_path()
if from_override:
process_path = self._get_override_path()
dir_name = util_file.get_dirname(process_path)
process = Process()
process.set_directory(dir_name)
if process.is_process():
basename = util_file.get_basename(dir_name)
path = util_file.get_dirname(dir_name)
return basename, path
else:
return None, None
def _get_code_file(self, name, basename=False):
"""
Args:
name (str): The name of a code folder in the process.
basename (bool): Whether to return the full path or just the name of the file.
Returns:
str: The path to the code file with the specified name in the current process.
"""
path = util_file.join_path(self.get_code_path(), name)
code_name = name
if not util_file.is_file(path):
path = ''
if not path:
path = util_file.join_path(self.get_code_path(), name)
code_name = util_file.get_basename(path)
if not code_name == 'manifest':
code_name = code_name + '.py'
if code_name == 'manifest':
code_name = code_name + '.data'
path = util_file.join_path(path, code_name)
return_value = None
if basename:
return_value = code_name
if not basename:
return_value = path
return return_value
def _get_enabled_children(self):
path = self.get_path()
found = []
disabled = []
for root, dirs, files in os.walk(path):
for folder in dirs:
if folder.startswith('.'):
continue
full_path = util_file.join_path(root, folder)
folder_name = os.path.relpath(full_path, path)
folder_name = util_file.fix_slashes(folder_name)
if folder_name.startswith('.') or folder_name.find('/.') > -1:
continue
parent_disabled = False
for dis_folder in disabled:
if folder_name.startswith(dis_folder):
parent_disabled = True
break
if parent_disabled:
continue
if not util_file.is_file_in_dir('.enable', full_path):
disabled.append(folder_name)
continue
found.append(folder_name)
found.reverse()
return found
def _get_control_inst(self):
if not self._control_inst:
self._control_inst = util_file.ControlNameFromSettingsFile(self.get_path())
def _get_data_instance(self, name, sub_folder):
path = self.get_data_path()
data_folder = data.DataFolder(name, path)
current_sub_folder = sub_folder
if sub_folder and sub_folder != False:
current_sub_folder = data_folder.get_current_sub_folder()
data_folder.set_sub_folder(sub_folder)
if sub_folder == False:
data_folder.set_sub_folder_to_default()
instance = data_folder.get_folder_data_instance()
return instance, current_sub_folder
def _refresh_process(self):
self._setup_options()
self._setup_settings()
self.runtime_values = {}
if self._control_inst:
self._control_inst.set_directory(self.get_path())
def _pass_module_globals(self, module):
"""
this was a test that might go further in the future.
the major problem was integer variables where not passable the first time.
"""
module_variable_dict = util_file.get_module_variables(module)
self._runtime_globals.update(module_variable_dict)
def _source_script(self, script):
util_file.delete_pyc(script)
put = None
if self._data_override:
put = self._data_override._put
else:
put = self._put
reset_process_builtins(self, {'put': put})
setup_process_builtins(self, {'put': put})
# util.show('Sourcing: %s' % script)
module = util_file.source_python_module(script)
status = None
init_passed = False
if module and not isinstance(module, str):
init_passed = True
if not module or isinstance(module, str):
status = module
init_passed = False
return module, init_passed, status
def _format_option_value(self, value, option_name=None):
new_value = value
option_type = None
if isinstance(value, list):
try:
option_type = value[1]
except:
pass
value = value[0]
if option_type == 'dictionary':
new_value = value[0]
if isinstance(new_value, dict) and isinstance(value[1], list):
new_value = collections.OrderedDict((key, new_value[key]) for key in value[1])
if isinstance(new_value, list):
new_value = new_value[0]
if option_type == 'note':
new_value = value[0]
if not option_type == 'script':
if util.is_str(value):
eval_value = None
try:
if value:
eval_value = eval(value)
except:
pass
if eval_value:
if isinstance(eval_value, list) or isinstance(eval_value, tuple) or isinstance(eval_value, dict):
new_value = eval_value
value = eval_value
if util.is_str(value):
new_value = util.convert_str_to_list(value)
if len(new_value) == 0:
new_value = ''
elif len(new_value) == 1:
new_value = new_value[0]
if self._option_result_function:
new_value = self._option_result_function(new_value, option_name)
# converting tuple to allow easier manipulation
if isinstance(new_value, tuple):
new_value = util.convert_to_sequence(new_value)
log.debug('Formatted value: %s' % str(new_value))
return new_value
def set_directory(self, directory):
"""
Args:
directory (str): Directory path to the process that should be created or where an existing process lives.
"""
log.debug('Set process directory: %s' % directory)
self.directory = directory
self._reset()
def load(self, name):
"""
Loads the named process into the instance.
Args:
name (str): Name of a process found in the directory.
Returns:
None
"""
log.debug('Load process: %s' % name)
self._set_name(name)
self._reset()
def set_external_code_library(self, directory):
"""
Args:
directory (str,list): Directory or list of directories where code can be sourced from. This makes it more convenient when writing scripts in a process.
"""
directory = util.convert_to_sequence(directory)
self.external_code_paths = directory
def is_process(self):
"""
Returns:
bool: Check to see if the initialized process is valid.
"""
if not util_file.exists(self.get_code_path()):
path = self.get_path()
self._handle_old_folders(path)
if not util_file.exists(self.get_code_path()):
return False
return True
def set_enabled(self, bool_value):
path = self.get_path()
if bool_value:
util_file.create_file(self.enable_filename, path)
if not bool_value:
util_file.delete_file(self.enable_filename, path, show_warning=False)
def is_enabled(self):
path = self.get_path()
enable_path = util_file.join_path(path, self.enable_filename)
if util_file.exists(enable_path):
return True
return False
def has_sub_parts(self):
process_path = self.get_path()
if not process_path:
return False
files = util_file.get_folders(process_path)
if not files:
return False
for filename in files:
file_path = util_file.join_path(process_path, filename)
if is_process(file_path):
return True
return False
def get_non_process_parts(self):
process_path = self.get_path()
if not process_path:
return
folders = util_file.get_folders(process_path)
found = []
for folder in folders:
full_path = util_file.join_path(process_path, folder)
if not is_process(full_path):
continue
found.append(full_path)
return found
def get_path(self):
"""
Returns:
str: The full path to the process folder.
If the process hasn't been created yet, this will return the directory set in set_directory.
"""
if not self.directory:
return
if self.process_name:
return util_file.join_path(self.directory, self.process_name)
if not self.process_name:
return self.directory
def get_name(self):
"""
Returns:
str: The name of the process.
"""
if not self.process_name:
return util_file.get_basename(self.directory)
return self.process_name
def get_basename(self):
"""
Returns:
str: The name of the process. If no name return basename of directory.
"""
name = self.process_name
if not name:
name = self.directory
return util_file.get_basename(name)
def get_relative_process(self, relative_path):
"""
Args:
relative_path (str): The path to a relative process.
Returns:
Process:An instance of a process at the relative path.
If a name with no backslash is supplied, this will return any matching process parented directly under the current process.
A relative path like, '../face' or '../../other_character' can be used.
Every '..' signifies a folder above the current process.
"""
process_name, process_directory = self._get_relative_process_path(relative_path)
if not process_name and process_directory:
process_name = util_file.get_basename(process_directory)
process_directory = util_file.get_dirname(process_directory)
if not process_name and not process_directory:
return
"""
test_path = util_file.join_path(process_directory, process_name)
if not util_file.is_dir(test_path):
util.warning('%s is not a valid path.' % test_path)
"""
process = Process(process_name)
process.set_directory(process_directory)
if self._data_override:
override_process_name, override_process_directory = self._get_relative_process_path(relative_path,
from_override=True)
if override_process_name:
override_process = Process(override_process_name)
override_process.set_directory(override_process_directory)
process.set_data_override(override_process)
return process
def get_sub_process_count(self):
"""
Returns:
int: The number of sub processes under the current.
"""
found = self.get_sub_processes()
if found:
return len(found)
def get_sub_processes(self):
"""
Returns:
list: The process names found directly under the current process.
"""
process_path = self.get_path()
found = find_processes(process_path)
return found
def get_sub_process(self, part_name):
"""
Args:
part_name (str): The name of a child process.
Returns:
Process: A sub process if there is one that matches part_name.
"""
part_process = Process(part_name)
part_process.set_directory(self.get_path())
return part_process
def get_sub_process_by_index(self, index):
found = self.get_sub_processes()
if index < len(found):
sub_process = Process(found[index])
sub_process.set_directory(self.get_path())
return sub_process
def get_parent_process(self):
name, path = self._get_parent_process_path()
if not name:
return
parent_process = Process(name)
parent_process.set_directory(path)
if self._data_override:
name, path = self._get_parent_process_path(from_override=True)
if name:
override_process = Process(name)
override_process.set_directory(path)
parent_process.set_data_override(override_process)
util.show('Parent process: %s' % parent_process.get_path())
return parent_process
def get_empty_process(self, path=None):
process = Process()
if path:
process.set_directory(path)
return process
def get_backup_path(self, directory=None):
if not self.directory:
return None
backup_directory = None
if directory:
backup_directory = directory
if not directory:
settings = util_file.get_vetala_settings_inst()
backup = settings.get('backup_directory')
if util_file.is_dir(backup):
project = settings.get('project_directory')
backup_directory = self.directory
backup_settings = util_file.SettingsFile()
backup_settings.set_directory(backup)
project_name = util_file.fix_slashes(project)
project_name = project_name.replace('/', '_')
project_name = project_name.replace(':', '_')
backup_settings.set(project_name, project)
backup_directory = util_file.create_dir(project_name, backup)
process_path = self.get_path()
common_path = util_file.remove_common_path_simple(project, process_path)
if common_path:
backup_directory = util_file.create_dir(util_file.join_path(backup_directory, common_path))
if not backup_directory:
backup_directory = self.get_path()
backup_path = util_file.join_path(backup_directory, self.backup_folder_name)
return backup_path
def backup(self, comment='Backup', directory=None):
backup_path = self.get_backup_path(directory)
backup_path = util_file.create_dir('temp_process_backup', backup_path)
util.show('Backing up to custom directory: %s' % backup_path)
copy_process(self, backup_path)
version = util_file.VersionFile(backup_path)
version.save(comment)
util_file.delete_dir(backup_path)
# data ---
def is_data_folder(self, name, sub_folder=None):
"""
Args:
name (str): The name of a data folder in the process.
sub_folder (str): TODO: Fill description.
Returns:
bool: True if the supplied name string matches the name of a data folder in the current process.
"""
path = self.get_data_folder(name, sub_folder)
if not path:
return False
if util_file.is_dir(path):
return True
return False
def is_folder_data(self, fullpath):
data_file = util_file.join_path(fullpath, 'data.json')
if util_file.exists(data_file):
return True
else:
return False
def get_folder_data_instance(self, fullpath):
if not self.is_folder_data(fullpath):
return
path = util_file.get_dirname(fullpath)
name = util_file.get_basename(fullpath)
data_folder = data.DataFolder(name, path)
return data_folder.get_folder_data_instance()
def get_data_path(self, in_folder=True):
"""
Returns:
str: The path to the data folder for this process.
"""
data_path = None
if not self._data_override:
data_path = self._get_path(self.data_folder_name)
if self._data_override:
data_path = self._data_override._get_path(self.data_folder_name)
if data_path and self._data_parent_folder and in_folder:
data_path = util_file.join_path(data_path, self._data_parent_folder)
return data_path
def get_data_folder(self, name, sub_folder=None):
"""
Args:
name (str): The name of a data folder in the process.
sub_folder (str): TODO: Fill description.
Returns:
str: The path to the data folder with the same name if it exists.
"""
folder = None
if not sub_folder:
folder = util_file.join_path(self.get_data_path(), name)
if sub_folder:
folder = util_file.join_path(self.get_data_sub_path(name), sub_folder)
if util_file.is_dir(folder, case_sensitive=True):
return folder
def get_data_sub_path(self, name):
"""
Get that path where sub folders live
"""
path = self._create_sub_data_folder(name)
return path
def get_data_type(self, name):
"""
Args:
name (str): The name of a data folder in the process.
Returns:
str: The name of the data type of the data folder with the same name if it exists.
"""
data_folder = self.get_data_folder(name)
data_file = util_file.join_path(data_folder, 'data.json')
if not util_file.is_file(data_file):
return
data_folder = data.DataFolder(name, self.get_data_path())
data_type = data_folder.get_data_type()
return data_type
def get_data_file_or_folder(self, name, sub_folder_name=None):
"""
Data is either saved to a top file or a top folder. This is the main data saved under the data folder.
This file or folder is used for versioning.
This will return the file or folder that gets versioned.
"""
path = self.get_data_path()
data_folder = data.DataFolder(name, path)
instance = data_folder.get_folder_data_instance()
if not instance:
return
filepath = instance.get_file_direct(sub_folder_name)
return filepath
def get_data_version_count(self, data_name):
data_folder = self.get_data_file_or_folder(data_name)
version = util_file.VersionFile(data_folder)
return len(version.get_version_numbers())
def get_data_versions(self, data_name):
data_folder = self.get_data_file_or_folder(data_name)
version = util_file.VersionFile(data_folder)
return version.get_version_numbers()
def get_data_version_paths(self, data_name):
data_folder = self.get_data_file_or_folder(data_name)
version = util_file.VersionFile(data_folder)
paths = version.get_versions(return_version_numbers_also=False)
found = []
for path in paths:
path = version.get_version_path(path)
found.append(path)
return found
def get_data_version_path(self, data_name, version_number):
data_folder = self.get_data_file_or_folder(data_name)
version = util_file.VersionFile(data_folder)
path = version.get_version_path(version_number)
return path
def get_data_folders(self):
"""
Returns:
list: A list of data folder names found in the current process.
"""
directory = self.get_data_path()
folders = util_file.get_folders(directory)
if '.sub' in folders:
folders.remove('.sub')
return folders
def get_data_instance(self, name, sub_folder=None):
"""
Args:
name (str): The name of a data folder in the process.
sub_folder (str): TODO: Fill description.
Returns:
Process: An instance of the data type class for data with the specified name in the current process.
This gives access to the data functions like import_data found in the data type class.
"""
path = self.get_data_path()
named_path = self.get_data_folder(name)
data_file = util_file.join_path(named_path, 'data.json')
if not util_file.exists(data_file):
return
data_folder = data.DataFolder(name, path)
return data_folder.get_folder_data_instance()
def create_data(self, name, data_type, sub_folder=None):
"""
Args:
name (str): The name of a data folder in the process.
data_type (str): A string with the name of the data type of the data in the process.
sub_folder (str): TODO: Fill description.
Returns:
str: The path to the new data folder.
"""
orig_name = name
path = self.get_data_path()
test_path = util_file.join_path(path, name)
if not sub_folder:
test_path = util_file.inc_path_name(test_path)
name = util_file.get_basename(test_path)
data_folder = data.DataFolder(name, path)
data_folder.set_data_type(data_type)
return_path = data_folder.folder_path
if sub_folder:
sub_path = self.get_data_sub_path(orig_name)
sub_folder_path = util_file.join_path(sub_path, sub_folder)
if util_file.is_dir(sub_folder_path):
return sub_folder_path
sub_folder_path = util_file.inc_path_name(sub_folder_path)
return_path = util_file.create_dir(sub_folder_path)
return return_path
def add_build_data(self):
build = self.get_data_folder('build')
if not build:
self.create_data('build', 'agnostic.platform')
def has_sub_folder(self, data_name, sub_folder_name):
"""
Has a sub folder of name.
"""
sub_folders = self.get_data_sub_folder_names(data_name)
if sub_folder_name in sub_folders:
return True
return False
def create_sub_folder(self, data_name, sub_folder_name):
data_type = self.get_data_type(data_name)
return self.create_data(data_name, data_type, sub_folder_name)
def get_data_sub_folder_names(self, data_name):
sub_folder = self.get_data_sub_path(data_name)
sub_folders = util_file.get_folders(sub_folder)
return sub_folders
def get_data_current_sub_folder(self, name):
"""
Get the currently set sub folder
"""
data_folder = data.DataFolder(name, self.get_data_path())
sub_folder = data_folder.get_current_sub_folder()
return sub_folder
def get_data_current_sub_folder_and_type(self, name):
"""
Get the currently set sub folder and its data type
"""
data_folder = data.DataFolder(name, self.get_data_path())
data_type = data_folder.get_data_type()
sub_folder = data_folder.get_sub_folder()
return sub_folder, data_type
# ---- data IO
def import_data(self, name, sub_folder=None):
"""
Convenience function which will run the import_data function found on the data_type instance for the specified data folder.
Args:
name (str): The name of a data folder in the process.
sub_folder (str): TODO: Fill description.
Returns:
None
"""
split_name = name.split('.')
if len(split_name) > 1:
name = split_name[0]
if sub_folder == None:
sub_folder = split_name[1]
data_folder_name = self.get_data_folder(name)
if not sub_folder:
util.show('Import data in: %s' % name)
if sub_folder:
util.show('Import data %s in sub folder %s' % (name, sub_folder))
if not util_file.is_dir(data_folder_name):
util.warning('%s data folder does not exist in %s' % (name, self.get_data_path()))
return
instance, original_sub_folder = self._get_data_instance(name, sub_folder)
if hasattr(instance, 'import_data'):
value = instance.import_data()
instance.set_sub_folder(original_sub_folder)
return value
else:
util.warning('Could not import data %s in process %s.'
' It has no import function.' % (name, self.process_name))
def open_data(self, name, sub_folder=None):
data_folder_name = self.get_data_folder(name)
util.show('Open data in: %s' % data_folder_name)
if not util_file.is_dir(data_folder_name):
util.show('%s data does not exist in %s' % (name, self.get_data_path()))
return
instance, original_sub_folder = self._get_data_instance(name, sub_folder)
return_value = None
if hasattr(instance, 'import_data') and not hasattr(instance, 'open'):
return_value = instance.import_data()
instance.set_sub_folder(original_sub_folder)
return return_value
if hasattr(instance, 'open'):
return_value = instance.open()
instance.set_sub_folder(original_sub_folder)
return return_value
else:
util.warning('Could not open data %s in process %s. It has no open function.' % (name, self.process_name))
def reference_data(self, name, sub_folder=None):
data_folder_name = self.get_data_folder(name)
util.show('Reference data in: %s' % data_folder_name)
if not util_file.is_dir(data_folder_name):
util.show('%s data does not exist in %s' % (name, self.get_data_path()))
return
instance, original_sub_folder = self._get_data_instance(name, sub_folder)
return_value = None
if hasattr(instance, 'maya_reference_data'):
return_value = instance.maya_reference_data()
else:
util.warning('Could not reference data %s in process %s. '
'has no reference function.' % (name, self.process_name))
instance.set_sub_folder(original_sub_folder)
return return_value
def save_data(self, name, comment='', sub_folder=None):
"""
Convenience function that tries to run the save function found on the data_type instance for the specified data folder. Not all data type instances have a save function.
Args:
name (str): The name of a data folder in the process.
comment (str): TODO: Fill description.
sub_folder (str): TODO: Fill description.
Returns:
None
"""
data_folder_name = self.get_data_folder(name)
if not util_file.is_dir(data_folder_name):
util.show('%s data does not exist in %s' % (name, self.get_data_path()))
util.show('Could not save')
return
instance, original_sub_folder = self._get_data_instance(name, sub_folder)
if not comment:
comment = 'Saved through process class with no comment.'
if hasattr(instance, 'save'):
saved = instance.save(comment)
instance.set_sub_folder(original_sub_folder)
if saved:
return True
return False
def export_data(self, name, comment='', sub_folder=None, list_to_export=None):
"""
Convenience function that tries to run the export function found on the data_type instance for the specified data folder. Not all data type instances have a save function.
Args:
name (str): The name of a data folder in the process.
comment (str): TODO: Fill description.
sub_folder (str): TODO: Fill description.
Returns:
None
"""
if list_to_export is None:
list_to_export = []
data_folder_name = self.get_data_folder(name)
if not util_file.is_dir(data_folder_name):
util.show('%s data does not exist in %s' % (name, self.get_data_path()))
util.show('Could not export')
return
instance, original_sub_folder = self._get_data_instance(name, sub_folder)
if not comment:
comment = 'Exported through process class with no comment.'
if hasattr(instance, 'export_data'):
selection_pass = False
if util.python_version > 3:
signature = inspect.signature(instance.export_data)
if 'selection' in signature.parameters and list_to_export:
selection_pass = True
if util.python_version < 3:
arg_spec = inspect.getargspec(instance.export_data)
if 'selection' in arg_spec.args and list_to_export:
selection_pass = True
if selection_pass:
exported = instance.export_data(comment, selection=list_to_export)
else:
exported = instance.export_data(comment)
# need to get all the data types returning true or false on export
instance.set_sub_folder(original_sub_folder)
# if exported:
# return True
# return False
# ---- data utils
def set_data_parent_folder(self, folder_name):
"""
Within the data path, sets folder_name as the parent folder to the data
"""
self._data_parent_folder = folder_name
def remove_data_parent_folder(self):
self._data_parent_folder = None
def rename_data(self, old_name, new_name):
"""
Renames the data folder specified with old_name to the new_name.
Args:
old_name (str): The current name of the data.
new_name (str): The new name for the data.
Returns:
str: The new path to the data if rename was successful.
"""
data_folder = data.DataFolder(old_name, self.get_data_path())
return data_folder.rename(new_name)
def delete_data(self, name, sub_folder=None):
"""
Deletes the specified data folder from the file system.
Args:
name (str): The name of a data folder in the process.
sub_folder (str): TODO: Fill description.
Returns:
None
"""
data_folder = data.DataFolder(name, self.get_data_path())
data_folder.set_sub_folder(sub_folder)
data_folder.delete()
def copy_sub_folder_to_data(self, sub_folder_name, data_name):
if not self.has_sub_folder(data_name, sub_folder_name):
util.warning('Data %s has no sub folder: %s to copy from.' % (data_name, sub_folder_name))
return
source_file = self.get_data_file_or_folder(data_name, sub_folder_name)
target_file = self.get_data_file_or_folder(data_name)
copy(source_file, target_file)
def copy_data_to_sub_folder(self, data_name, sub_folder_name):
if not self.has_sub_folder(data_name, sub_folder_name):
util.warning('Data %s has no sub folder: %s to copy to.' % (data_name, sub_folder_name))
return
source_file = self.get_data_file_or_folder(data_name)
target_file = self.get_data_file_or_folder(data_name, sub_folder_name)
copy(source_file, target_file)
def remove_data_versions(self, name, sub_folder=None, keep=1):
folder = self.get_data_folder(name, sub_folder)
util_file.delete_versions(folder, keep)
def cache_data_type_read(self, name):
data_folder = data.DataFolder(name, self.get_data_path())
data_type = util_file.join_path(data_folder.folder_path, 'data.json')
util_file.ReadCache.cache_read_data(data_type)
def delete_cache_data_type_read(self, name):
data_folder = data.DataFolder(name, self.get_data_path())
data_type = util_file.join_path(data_folder.folder_path, 'data.json')
util_file.ReadCache.remove_read_data(data_type)
# code ---
def is_code_folder(self, name):
"""
Args:
name (str): The name of a code folder in the process.
Returns:
bool: If the supplied name string matches the name of a code folder in the current process.
"""
path = self.get_code_folder(name)
if not path:
return False
if util_file.is_dir(path):
if util_file.is_file_in_dir('data.json', path):
return True
return False
return False
def get_code_path(self):
"""
Returns:
str: The path to the code folder for this process.
"""
return self._get_path(self.code_folder_name)
def get_code_folder(self, name):
"""
Args:
name (str): The name of a code folder in the process.
Returns:
str: A path to the code folder with the supplied name string if it exists.
"""
if name.endswith('.py'):
name = name[:-3]
if name.endswith('.data'):
name = name[:-5]
folder = util_file.join_path(self.get_code_path(), name)
if util_file.is_dir(folder):
return folder
def get_code_folders(self, code_name=None):
"""
Returns:
list: A list of code folder names found in the current process.
"""
directory = self.get_code_path()
if code_name:
directory = util_file.join_path(directory, code_name)
return util_file.get_code_folders(directory, recursive=True)
def get_top_level_code_folders(self):
folders = self.get_code_folders()
found = []
for folder in folders:
if folder.count('/') > 1:
continue
found.append(folder)
return found
def get_code_names(self, include_scripts=False):
codes, states = self.get_manifest()
code_names = []
if codes:
for code in codes:
code_name = code.split('.')
if not self.is_code_folder(code_name[0]):
continue
if len(code_name) > 1 and code_name[1] == 'py':
code_names.append(code_name[0])
code_names.insert(0, 'manifest')
if include_scripts:
result = util_file.get_all_rel_paths(self.get_code_path())
filter_names = ['.version', '__pycache__', '.pyc']
code_path = self.get_code_path()
visited = {code_path:False}
for path in result:
found = False
for name in filter_names:
if path.find(name) > -1:
found = True
break
if found:
continue
full_path = util_file.join_path(code_path, path)
if self.is_folder_data(full_path):
continue
parent_path = util_file.get_dirname(full_path)
parent_is_data = False
if parent_path in visited:
parent_is_data = visited[parent_path]
else:
parent_is_data = self.is_folder_data(parent_path)
visited[parent_path] = parent_is_data
if parent_is_data:
continue
code_names.append(util_file.fix_slashes(path))
return code_names
def get_code_children(self, code_name):
found = []
code_name = util_file.remove_extension(code_name)
scripts, states = self.get_manifest()
for script in scripts:
if script.find('/') == -1:
continue
if script.startswith(code_name + '/'):
sub_script = script[len(code_name + '/'):]
if not sub_script.find('/') > -1:
found.append(script)
return found
def get_code_type(self, name):
"""
Args:
name (str): The name of a code folder in the process.
Returns:
str: The code type name of the code folder with the supplied name if the code folder exists. Otherwise return None. Right now only python code type is used by the Process Manager.
"""
# this was added because data folder is sometimes faulty
path = util_file.join_path(self.get_code_path(), name)
python_file = util_file.join_path(path, util_file.get_basename(name) + '.py')
data_type = None
if util_file.is_file(python_file):
data_type = 'script.python'
return data_type
fullpath = util_file.join_path(self.get_code_path(), name)
if self.is_folder_data(fullpath):
data_folder = data.DataFolder(name, self.get_code_path())
data_type = data_folder.get_data_type()
return data_type
def get_code_files(self, basename=False, fast_with_less_checking=False):
"""
Args:
basename (bool): Whether to return the full path or just the name of the file.
fast_with_less_checking (bool): TODO: Fill description.
Returns:
list: The path to the code files found in the code folder for the current process.
If basename is True, only return the file names without the path.
"""
directory = self.get_code_path()
files = []
folders = self.get_code_folders()
if not folders:
return
for folder in folders:
path = util_file.join_path(directory, folder)
code_file = util_file.join_path(path, (util_file.get_basename(folder) + '.py'))
if util_file.is_file(code_file):
files.append(code_file)
continue
if fast_with_less_checking:
continue
data_folder = data.DataFolder(folder, directory)
data_instance = data_folder.get_folder_data_instance()
if data_instance:
file_path = data_instance.get_file()
if not basename:
files.append(file_path)
if basename:
rel_file_path = util_file.remove_common_path_simple(directory, file_path)
split_path = rel_file_path.split('/')
code_path = '/'.join(split_path[:-1])
files.append(code_path)
return files
def get_code_file(self, name, basename=False):
"""
Args:
name (str): The name of a code folder in the process.
basename (bool): Whether to return the full path or just the name of the file.
Returns:
str: The path to the code file with the specified name in the current process.
"""
path = self._get_code_file(name, basename)
if not util_file.exists(path):
first_matching = self.get_first_matching_code(name)
if first_matching:
return first_matching
return
return path
def get_first_matching_code(self, name):
codes = self.get_code_files(basename=False, fast_with_less_checking=True)
short_name = util_file.get_basename(name)
short_name = util_file.remove_extension(short_name)
if codes:
for code in codes:
if code.endswith('%s.py' % short_name):
return code
def get_code_name_from_path(self, code_path):
split_path = code_path.split('%s/' % self.code_folder_name)
if len(split_path) == 2:
parts = split_path[1].split('/')
if len(parts) > 2:
last_part = util_file.remove_extension(parts[-1])
if last_part == parts[-2]:
if len(parts) > 2:
return '/'.join(parts[:-1])
if last_part != parts[-2]:
return '/'.join(parts)
if len(parts) == 2:
return parts[0]
def get_code_module(self, name):
"""
Returns:
module: The module instance
bool: If the module sourced properly or not
str: The status of the source. Error messages etc.
"""
script = self.get_code_file(name)
module, init_passed, status = self._source_script(script)
return module, init_passed, status
def create_code(self, name, data_type='script.python', inc_name=False, import_data=None):
"""
Create a new code folder with the specified name and data_type.
Args:
name (str): The name of the code to create.
data_type (str): Usually 'script.python'.
inc_name (bool): Whether to increment the name.
import_data (str): The name of data in the process.
Lines will be added to the code file to import the data.
Returns:
str: Filename
"""
path = self.get_code_path()
if not path:
return
if inc_name:
test_path = util_file.join_path(path, name)
if util_file.exists(test_path):
test_path = util_file.inc_path_name(test_path)
name = util_file.get_basename(test_path)
path = util_file.get_dirname(test_path)
log.info('Create code %s at path %s' % (name, path))
data_folder = data.DataFolder(name, path)
data_folder.set_data_type(data_type)
data_instance = data_folder.get_folder_data_instance()
if not data_instance:
return
if name == 'manifest':
data_instance.create()
filename = data_instance.get_file()
return filename
if import_data:
data_instance.set_lines(['', 'def main():', " process.import_data('%s')" % import_data])
if not import_data:
data_instance.set_lines(['', 'def main():', ' return'])
data_instance.create()
filename = data_instance.get_file()
if not self.is_in_manifest('%s.py' % name):
self.set_manifest(['%s.py' % name], append=True)
return filename
def move_code(self, old_name, new_name):
code_path = self.get_code_path()
old_path = util_file.join_path(code_path, old_name)
new_path = util_file.join_path(code_path, new_name)
basename = util_file.get_basename(new_name)
dirname = util_file.get_dirname(new_name)
test_path = new_path
if util_file.is_dir(test_path):
last_number = 1
while util_file.is_dir(test_path):
basename = util.replace_last_number(basename, last_number)
new_name = basename
if dirname:
new_name = util_file.join_path(dirname, basename)
test_path = util_file.join_path(code_path, new_name)
last_number += 1
util_file.move(old_path, test_path)
file_name = new_name
old_basename = util_file.get_basename(old_name)
new_basename = util_file.get_basename(new_name)
update_path = util_file.join_path(test_path, old_basename + '.py')
util_file.rename(update_path, new_basename + '.py')
return file_name
def rename_code(self, old_name, new_name):
"""
Renames the code folder specified with old_name to the new_name.
Args:
old_name (str): The current name of the code.
new_name (str): The new name for the code.
Returns:
str: The new path to the code if rename was successful.
"""
new_name = util.clean_file_string(new_name)
new_name = new_name.replace('.', '_')
old_len = old_name.count('/')
new_len = new_name.count('/')
if old_len != new_len:
util.warning('Rename works on code folders in the same folder. Try move instead.')
return
sub_new_name = util_file.remove_common_path(old_name, new_name)
code_folder = data.DataFolder(old_name, self.get_code_path())
code_folder.rename(sub_new_name)
name = new_name + '.py'
return name
"""
def duplicate_code(self, name):
source_path = util_file.join_path(self.get_code_path(), name)
destination_path = util_file.join_path(self.get_code_path(), '%s_copy' % name)
util_file.copy_dir(source_path, destination_path)
return destination_path
"""
def delete_code(self, name):
"""
Deletes the specified data folder from the file system.
Args:
name (str): The name of a data folder in the process.
Returns:
None
"""
util_file.delete_dir(name, self.get_code_path())
def remove_code_versions(self, code_name, keep=1):
folder = self.get_code_folder(code_name)
util_file.delete_versions(folder, keep)
#--- Ramen
def get_ramen_path(self):
"""
Returns:
str: The path to the ramen folder for this process.
"""
return self._get_path(self.ramen_folder_name)
def get_ramen_graphs(self):
ramen_path = self.get_ramen_path()
ramen_path = util_file.join_path(ramen_path, 'graphs')
folders = util_file.get_folders(ramen_path, recursive=False, filter_text=None, skip_dot_prefix=False)
return folders
def get_ramen_graph(self, graph_name):
ramen_path = self.get_ramen_path()
ramen_graph_path = util_file.join_path(ramen_path, 'graphs/%s' % graph_name)
if util_file.is_dir(ramen_graph_path):
return ramen_graph_path
def get_ramen_file(self, graph_name):
graph_path = self.get_ramen_graph(graph_name)
filepath = util_file.join_path(graph_path, 'ramen.json')
if util_file.is_file(filepath):
return filepath
def create_ramen_file(self, graph_name):
ramen_file = self.get_ramen_file(graph_name)
if not ramen_file:
ramen_path = self.get_ramen_path()
ramen_graph_path = util_file.join_path(ramen_path, 'graphs/%s' % graph_name)
ramen_graph_path = util_file.create_dir(ramen_graph_path)
if ramen_graph_path:
ramen_file = util_file.create_file('ramen.json', ramen_graph_path)
return ramen_file
#--- settings
def get_setting_names(self):
option_file = self.get_option_file()
option_name = util_file.get_basename_no_extension(option_file)
settings_file = self.get_settings_file()
settings_name = util_file.get_basename_no_extension(settings_file)
return [settings_name, option_name]
def get_setting_file(self, name):
if name == 'options':
return self.get_option_file()
if name == 'settings':
return self.get_settings_file()
def get_settings_file(self):
self._setup_settings()
return self.settings.get_file()
def get_settings_inst(self):
self._setup_settings()
return self.settings
def set_setting(self, name, value):
self._setup_settings()
self.settings.set(name, value)
def get_setting(self, name):
self._setup_settings()
return self.settings.get(name)
def get_control(self, description, side):
self._get_control_inst()
return self._control_inst.get_name(description, side)
# --- options
def has_options(self):
self._setup_options()
return self.option_settings.has_settings()
def add_option(self, name, value, group=None, option_type=None):
self._setup_options()
show_value = None
if group:
name = '%s.%s' % (group, name)
if not group:
name = '%s' % name
if option_type == 'script':
show_value = value
value = [value, 'script']
if option_type == 'ui':
show_value = value
value = [value, 'ui']
if option_type == 'dictionary':
show_value = value
value = [value, 'dictionary']
if option_type == 'reference.group':
show_value = value
value = [value, 'reference.group']
if option_type == 'note':
value = str(value)
show_value = value
value = [value, 'note']
has_option = self.option_settings.has_setting(name)
if not has_option and show_value is not None:
util.show('Creating option: %s with a value of: %s' % (name, show_value))
self.option_settings.set(name, value)
def set_option(self, name, value, group=None):
self._setup_options()
if group:
name = '%s.%s' % (group, name)
if not group:
name = '%s' % name
self.option_settings.set(name, value)
def get_unformatted_option(self, name, group=None):
self._setup_options()
if group:
name = '%s.%s' % (group, name)
if not group:
name = '%s' % name
value = self.option_settings.get(name)
return value
def set_option_index(self, index, name, group=None):
self._setup_options()
if group:
name = '%s.%s' % (group, name)
if not group:
name = '%s' % name
remove = False
for thing in self.option_settings.settings_order:
if thing == name:
remove = True
if remove:
self.option_settings.settings_order.remove(name)
self.option_settings.settings_order.insert(index, name)
self.option_settings._write()
def get_option(self, name, group=None):
"""
Get an option by name and group
"""
self._setup_options()
value = self.get_unformatted_option(name, group)
if value is None:
match_value = self.get_option_match_and_group(name, return_first=True)
if not match_value:
return None
value = match_value[0]
match_group = match_value[1]
if value and group:
if not match_group.endswith(group):
util.warning('Access option: %s, but it was not in group: % s' % (name, group))
group = match_group
if value is None:
util.warning('Trouble accessing option %s.' % name)
if self.has_option(name, group):
if group:
util.warning('Could not find option: %s in group: %s' % (name, group))
else:
util.warning('Could not find option: %s' % name)
else:
value = self._format_option_value(value, name)
log.info('Get option: name: %s group: %s with value: %s' % (name, group, value))
util.show('< Option: %s, Group: %s, value: %s' % (name, group, value))
return value
def get_option_match_and_group(self, name, return_first=True):
"""
Try to find a matching option in all the options
Return the matching value and group
"""
self._setup_options()
option_dict = self.option_settings.settings_dict
found = {}
for key in option_dict:
split_key = key.split('.')
group = '.'.join(split_key[:-1])
if split_key[-1] == name:
value = None
if return_first:
value = self._format_option_value(option_dict[key], key)
return value, group
found[name] = [value, group]
if not found:
found = None
return found
def get_option_match(self, name, return_first=True):
"""
Try to find a matching option in all the options
"""
self._setup_options()
option_dict = self.option_settings.settings_dict
found = {}
for key in option_dict:
split_key = key.split('.')
if split_key[-1] == name:
value = None
if return_first:
value = self._format_option_value(option_dict[key], key)
return value
found[name] = value
if not found:
found = None
return found
def set_option_result_function(self, function_inst):
"""
Function to run on option results
Function needs to accept two arguments (value, str)
Function needs to return value
"""
self._option_result_function = function_inst
def has_option(self, name, group=None):
self._setup_options()
if group:
name = '%s.%s' % (group, name)
return self.option_settings.has_setting_match(name)
def get_options(self):
self._setup_options()
options = []
if self.option_settings:
options = self.option_settings.get_settings()
return options
def get_option_name_at_index(self, index):
count = len(self.option_settings.settings_order)
if index >= count:
util.warning('Option index out of range')
return self.option_settings.settings_order[index]
def get_option_file(self):
self._setup_options()
return self.option_settings.get_file()
def clear_options(self):
if self.option_settings:
self.option_settings.clear()
def save_default_option_history(self):
option_file = self.get_option_file()
version_file = util_file.VersionFile(option_file)
version_file.set_version_folder_name('.backup/.option_versions')
return version_file
def load_default_option_history(self):
option_file = self.get_option_file()
version_file = util_file.VersionFile(option_file)
version_file.set_version_folder_name('.backup/.option_versions')
return version_file
def get_option_history(self):
option_file = self.get_option_file()
version_file = util_file.VersionFile(option_file)
version_file.set_version_folder_name('.backup/.option_versions')
return version_file
# --- manifest
def get_manifest(self, manifest_file=None):
"""
Returns:
tuple: (list, list) Two lists, scripts and states.
The scripts list contains the name of scripts in the manifest.
States contains the enabled/disabled state of the script.
"""
if not manifest_file:
manifest_file = self.get_manifest_file()
if not util_file.exists(manifest_file):
return None, None
lines = util_file.get_file_lines(manifest_file)
if not lines:
return None, None
scripts = []
states = []
for line in lines:
if not line:
continue
states.append(False)
split_line = line.split()
if len(split_line):
script_name = ' '.join(split_line[:-1])
scripts.append(script_name)
if len(split_line) >= 2:
# TODO: remove eventually - this is because of pyside6 conversion bug. leave in code for a while just in case.
if split_line[-1] == 'CheckState.Checked':
state = 'True'
elif split_line[-1] == 'CheckState.UnCheck':
state = 'False'
state = eval(split_line[-1])
states[-1] = state
return scripts, states
def get_manifest_dict(self, manifest_file=None):
"""
Returns:
dict: name of code : state
"""
if not manifest_file:
manifest_file = self.get_manifest_file()
manifest_dict = {}
if not util_file.is_file(manifest_file):
return manifest_dict
lines = util_file.get_file_lines(manifest_file)
if not lines:
return manifest_dict
for line in lines:
script_name = None
if not line:
continue
split_line = line.split()
if len(split_line):
script_name = ' '.join(split_line[:-1])
manifest_dict[script_name] = False
if len(split_line) >= 2 and script_name:
state = eval(split_line[-1])
manifest_dict[script_name] = state
return manifest_dict
def get_manifest_folder(self):
"""
Returns:
str: The path to the manifest folder.
"""
if not self.directory:
return
code_path = self.get_code_path()
path = util_file.join_path(code_path, 'manifest')
if not util_file.exists(path):
try:
self.create_code('manifest', 'script.manifest')
except:
util.warning('Could not create manifest in directory: %s' % code_path)
return path
def get_manifest_file(self):
"""
Returns:
str: The path to the manifest file.
"""
if not self.directory:
return
manifest_path = self.get_manifest_folder()
filename = util_file.join_path(manifest_path, self.process_data_filename)
if not util_file.exists(filename):
self.create_code('manifest', 'script.manifest')
return filename
def get_manifest_scripts(self, basename=True, fast_with_less_checks=False):
"""
Args:
basename (bool): Whether to return the full path or just the name of the file.
fast_with_less_checks (bool): TODO: Fill description.
Returns:
list: The code files named in the manifest.
"""
manifest_file = self.get_manifest_file()
if not manifest_file:
return
if not util_file.is_file(manifest_file):
return
files = self.get_code_files(False, fast_with_less_checking=fast_with_less_checks)
scripts, states = self.get_manifest()
if basename:
return scripts
if not basename:
found = []
for script in scripts:
if script.count('/') > 0:
dirname = util_file.get_dirname(script)
basename = util_file.get_basename(script)
sub_basename = util_file.get_basename_no_extension(basename)
script = util_file.join_path(dirname, sub_basename)
script = util_file.join_path(script, basename)
for filename in files:
if not filename:
continue
if filename.endswith(script) and filename not in found:
found.append(filename)
break
return found
def is_in_manifest(self, entry):
filename = self.get_manifest_file()
lines = util_file.get_file_lines(filename)
for line in lines:
split_line = line.split(' ')
if split_line[0] == entry:
return True
return False
def get_manifest_history(self):
manifest_file = self.get_manifest_file()
version_file = util_file.VersionFile(manifest_file)
return version_file
def set_manifest(self, scripts, states=None, append=False):
"""
This will tell the manifest what scripts to list. Scripts is a list of python files that need to correspond with code data.
Args:
scripts (list): List of scripts to add to the manifest.
states (list): List that of states for that corresponds to the scripts list.
append (bool): Whether to add the scripts to the end of the manifest or replace it.
"""
if states is None:
states = []
manifest_file = self.get_manifest_file()
lines = []
script_count = len(scripts)
if states:
state_count = len(states)
if not states:
state_count = 0
for inc in range(0, script_count):
if scripts[inc] == 'manifest.py':
continue
if inc > state_count - 1:
state = False
if inc < state_count:
state = states[inc]
line = '%s %s' % (scripts[inc], state)
lines.append(line)
util_file.write_lines(manifest_file, lines, append=append)
def has_script(self, script_name):
if not script_name.endswith('.py'):
script_name = script_name + '.py'
scripts, states = self.get_manifest()
if script_name in scripts:
return True
return False
def get_script_parent(self, script_name):
if not script_name.endswith('.py'):
script_name = script_name + '.py'
scripts, states = self.get_manifest()
for inc in range(0, len(scripts)):
if script_name == scripts[inc]:
test_inc = inc - 1
if test_inc < 0:
break
while scripts[test_inc].count('/') != scripts[inc].count('/'):
test_inc -= 1
if test_inc < 0:
break
if test_inc >= 0:
return scripts[test_inc]
def get_previous_script(self, script_name):
if not script_name.endswith('.py'):
script_name = script_name + '.py'
scripts, states = self.get_manifest()
if scripts is None and states is None:
return None, None
last_script = None
last_state = None
for script, state in zip(scripts, states):
if last_script:
if script_name == script:
return last_script, last_state
last_script = script
last_state = state
return None, None
def insert_manifest_below(self, script_name, previous_script_name, state=False):
if not script_name.endswith('.py'):
script_name = script_name + '.py'
if not previous_script_name.endswith('.py'):
previous_script_name = previous_script_name + '.py'
scripts, states = self.get_manifest()
script_count = 0
if scripts:
script_count = len(scripts)
code_folders = self.get_code_folders()
if not script_count and not code_folders:
return
for inc in range(0, len(scripts)):
script = scripts[inc]
if script == previous_script_name:
scripts.insert(inc + 1, script_name)
states.insert(inc + 1, state)
break
self.set_manifest(scripts, states)
def get_script_state(self, script_name):
if not script_name.endswith('.py'):
script_name = script_name + '.py'
scripts, states = self.get_manifest()
if scripts is None and states is None:
return False
for script, state in zip(scripts, states):
if script == script_name:
return state
def set_script_state(self, script_name, bool_value):
if not script_name.endswith('.py'):
script_name = script_name + '.py'
scripts, states = self.get_manifest()
if not scripts:
util.warning('Could not update state on %s, because it is not in the manifest' % script_name)
return
for inc in range(0, len(scripts)):
script = scripts[inc]
if script == script_name:
states[inc] = bool_value
self.set_manifest(scripts, states)
def sync_manifest(self):
"""
Sync the manifest with what's on disk.
"""
if not self.directory:
return
scripts, states = self.get_manifest()
script_count = 0
if scripts:
script_count = len(scripts)
synced_scripts = []
synced_states = []
code_folders = self.get_code_folders()
if not script_count and not code_folders:
return
for inc in range(0, script_count):
script_name = util_file.remove_extension(scripts[inc])
filepath = self._get_code_file(script_name)
if not util_file.exists(filepath):
continue
if scripts[inc] in synced_scripts:
continue
synced_scripts.append(scripts[inc])
synced_states.append(states[inc])
remove_inc = None
for inc in range(0, len(code_folders)):
if code_folders[inc] == script_name:
remove_inc = inc
break
if code_folders in synced_scripts:
if not code_folders[inc].count('/'):
continue
common_path = util_file.get_common_path(code_folders[inc], script_name)
if common_path:
common_path_name = common_path + '.py'
if common_path_name in synced_scripts:
code_script = code_folders[inc] + '.py'
synced_scripts.append(code_script)
synced_states.append(False)
remove_inc = inc
break
if not remove_inc is None:
code_folders.pop(remove_inc)
for code_folder in code_folders:
code_script = code_folder + '.py'
synced_scripts.append(code_script)
synced_states.append(False)
self.set_manifest(synced_scripts, synced_states)
# --- creation
def add_part(self, name):
"""
Args:
name (str): Name for a new process.
Returns:
Process: Instnace of the added part.
"""
part_process = Process(name)
path = util_file.join_path(self.directory, self.process_name)
if self.process_name:
path = util_file.join_path(self.directory, self.process_name)
if not self.process_name:
path = self.directory
part_process.set_directory(path)
part_process.create()
return part_process
def create(self):
"""
Create the process.
Return
(str): Path to the process.
"""
return self._create_folder()
def delete(self):
"""
Delete the process.
Returns:
None
"""
if self.process_name:
util_file.delete_dir(self.process_name, self.directory)
if not self.process_name:
basename = util_file.get_basename(self.directory)
dirname = util_file.get_dirname(self.directory)
util_file.delete_dir(basename, dirname)
def rename(self, new_name):
"""
Rename the process.
Args:
new_name (str): New name for the process.
Returns:
bool: Whether the process was renamed properly.
"""
split_name = new_name.split('/')
if util_file.rename(self.get_path(), split_name[-1]):
self.load(new_name)
return True
return False
def find_code_file(self, script):
"""
Args:
script (str): Name (or full path) of a code in the process
"""
if not util_file.is_file(script):
script = util_file.remove_extension(script)
script = self._get_code_file(script)
if not util_file.is_file(script):
script = self.get_first_matching_code(script)
return script
# --- run
@decorator_process_run_script
def run_script(self, script, hard_error=True, settings=None, return_status=False):
"""
Run a script in the process.
Args:
script(str): Name of a code in the process.
hard_error (bool): Whether to error hard when errors encountered, or to just pass an error string.
return_status (bool): TODO: Fill description.
Returns:
str: The status from running the script. This includes error messages.
"""
watch = util.StopWatch()
watch.start(feedback=False)
self._setup_options()
sys.path.append(self.get_code_path())
orig_script = script
status = None
result = None
init_passed = False
module = None
name = None
try:
script = self.find_code_file(script)
if not script:
if not script:
watch.end()
util.show('Could not find script: %s' % orig_script)
return
name = util_file.get_basename(script)
for external_code_path in self.external_code_paths:
if util_file.is_dir(external_code_path):
if external_code_path not in sys.path:
sys.path.append(external_code_path)
util.show('\n________________________________________________')
message = 'START\t%s\n' % name
util.show(message)
util.global_tabs = 2
module, init_passed, status = self._source_script(script)
except Exception:
util.warning('%s did not source' % script)
status = traceback.format_exc()
init_passed = False
if hard_error:
try:
del module
except:
watch.end()
util.warning('Could not delete module')
util.error('%s\n' % status)
raise Exception('Script did not source. %s' % script)
if init_passed:
try:
if hasattr(module, 'main'):
if not hasattr(module, 'process') or module.process is None:
# for legacy, if process was set to None override it with this process
module.process = self
result = module.main()
put = None
if self._data_override:
put = self._data_override._put
else:
put = self._put
put.last_return = result
self._runtime_globals['last_return'] = result
status = 'Success'
except Exception:
status = traceback.format_exc()
if hard_error:
watch.end()
util.error('%s\n' % status)
raise Exception('Script errored on main. %s' % script)
self._pass_module_globals(module)
del module
if not status == 'Success':
util.show('%s\n' % status)
minutes, seconds = watch.end()
util.global_tabs = 1
message = ''
if minutes and seconds:
message = 'END\t%s\t %s minutes and %s seconds ' % (name, minutes, seconds)
else:
message = 'END\t%s\t %s seconds' % (name, seconds)
util.show(message)
util.show('------------------------------------------------\n')
if return_status:
return status
else:
return result
def run_option_script(self, name, group=None, hard_error=True):
script = self.get_option(name, group)
self.run_code_snippet(script, hard_error)
@decorator_undo_chunk
def run_code_snippet(self, code_snippet_string, hard_error=True):
script = code_snippet_string
status = None
try:
for external_code_path in self.external_code_paths:
if util_file.is_dir(external_code_path):
if external_code_path not in sys.path:
sys.path.append(external_code_path)
pass_process = self
if self._data_override:
pass_process = self._data_override
builtins = get_process_builtins(pass_process)
exec(script, globals(), builtins)
status = 'Success'
except Exception:
util.warning('script error!\n %s' % script)
status = traceback.format_exc()
if hard_error:
util.error('%s\n' % status)
raise
if not status == 'Success':
util.show('%s\n' % status)
return status
def run_script_group(self, script, clear_selection=True, hard_error=True):
"""
This runs the script and all of its children/grandchildren.
"""
status_list = []
scripts_that_error = []
skip_children = False
if in_maya:
if clear_selection:
cmds.select(cl=True)
status = None
try:
status = self.run_script(script, hard_error=True, return_status=True)
if self._skip_children:
skip_children = True
self._skip_children = None
except:
if hard_error:
util.error('%s\n' % status)
raise
status = 'fail'
if not status == 'Success':
scripts_that_error.append(script)
if hard_error:
message = 'Script: %s in run_script_group.' % script
temp_log = '\nError: %s' % message
util.record_temp_log(temp_log)
raise Exception(message)
children = self.get_code_children(script)
if skip_children:
children = []
skip_children = False
child_count = len(children)
manifest_dict = self.get_manifest_dict()
progress_bar = None
if in_maya:
progress_bar = core.ProgressBar('Process Group', child_count)
progress_bar.status('Processing Group: getting ready...')
skip_children = False
for child in children:
if progress_bar:
progress_bar.set_count(child_count)
progress_bar.status('Processing: %s' % script)
if progress_bar.break_signaled():
message = 'The script group was cancelled before finishing.'
temp_log = '\nError: %s' % message
util.record_temp_log(temp_log)
raise Exception(message)
if manifest_dict[child]:
if in_maya:
if clear_selection:
cmds.select(cl=True)
children = self.get_code_children(child)
if skip_children:
children = []
skip_children = False
if children:
try:
status = self.run_script_group(child, hard_error=True)
if self._skip_children:
skip_children = True
self._skip_children = None
except:
if hard_error:
util.error('%s\n' % status)
if progress_bar:
progress_bar.end()
raise
status = 'fail'
if not children:
try:
status = self.run_script(child, hard_error=True, return_status=True)
if self._skip_children:
skip_children = True
self._skip_children = None
except:
if hard_error:
util.error('%s\n' % status)
if progress_bar:
progress_bar.end()
raise
status = 'fail'
if status == 'fail':
scripts_that_error.append(child)
if hard_error:
if progress_bar:
progress_bar.end()
message = 'Script: %s in run_script_group.' % script
temp_log = '\nError: %s' % message
util.record_temp_log(temp_log)
raise Exception(message)
if progress_bar:
progress_bar.inc()
if not isinstance(status, list):
status_list.append([child, status])
else:
status_list += status
if progress_bar:
progress_bar.end()
return status_list
def skip_children(self):
"""
To be run during process to skip running of children scripts
"""
self._skip_children = None
script_name = self.current_script
script_path = self.find_code_file(script_name)
if not script_path:
return
code_path = self.get_code_path()
code_name = util_file.remove_common_path_simple(code_path, script_path)
code_name = util_file.get_dirname(code_name)
childs = self.get_code_children(code_name)
if childs:
self._skip_children = childs
return childs
def run(self, start_new=False):
"""
Run all the scripts in the manifest, respecting their on/off state.
Returns:
None
"""
self.option_settings = None
self._setup_options()
prev_process = os.environ.get('VETALA_CURRENT_PROCESS')
util.set_env('VETALA_CURRENT_PROCESS', self.get_path())
util.show('\n-----------------------------------------'
'-------------------------------------------------------------')
watch = util.StopWatch()
watch.start(feedback=False)
name = self.get_name()
message = '\n\n\aProcess: %s\t\a\n' % name
manage_node_editor_inst = None
if in_maya:
manage_node_editor_inst = core.ManageNodeEditors()
if start_new:
core.start_new_scene()
manage_node_editor_inst.turn_off_add_new_nodes()
if core.is_batch():
message = '\n\nProcess: %s\n\n' % name
util.show(message)
util.show('\n\nProcess path: %s' % self.get_path())
util.show('Option path: %s' % self.get_option_file())
util.show('Settings path: %s' % self.get_settings_file())
util.show('Runtime values: %s\n\n' % self.runtime_values)
scripts, states = self.get_manifest()
if not scripts:
util.show('No scripts!')
return
scripts_that_error = []
state_dict = {}
progress_bar = None
if in_maya:
progress_bar = core.ProgressBar('Process', len(scripts))
progress_bar.status('Processing: getting ready...')
status_list = []
skip_children = None
for inc in range(0, len(scripts)):
state = states[inc]
script = scripts[inc]
status = 'Skipped'
check_script = util_file.remove_extension(script)
if skip_children:
if script.startswith(skip_children):
state = False
state_dict[check_script] = state
if progress_bar:
progress_bar.status('Processing: %s' % script)
if progress_bar.break_signaled():
break
if state:
parent_state = True
for key in state_dict:
if script.find(key) > -1:
parent_state = state_dict[key]
if parent_state == False:
break
if not parent_state:
util.show('\tSkipping: %s\n\n' % script)
if progress_bar:
progress_bar.inc()
continue
self._update_options = False
if in_maya:
cmds.select(cl=True)
try:
status = self.run_script(script, hard_error=False, return_status=True)
if self._skip_children:
skip_children = check_script
self._skip_children = None
except Exception:
error = traceback.format_exc()
util.error(error)
status = 'fail'
self._update_options = True
if not status == 'Success':
scripts_that_error.append(script)
if not states[inc]:
util.show('\n------------------------------------------------')
util.show('Skipping: %s\n\n' % script)
if progress_bar:
progress_bar.inc()
status_list.append([script, status])
minutes, seconds = watch.stop()
if progress_bar:
progress_bar.end()
if scripts_that_error:
util.show('\n\n\nThe following scripts errored during buil/project/')
for script in scripts_that_error:
util.show('\n' + script)
util.show('\n\n')
for status_entry in status_list:
util.show('%s : %s' % (status_entry[1], status_entry[0]))
util.show('\n\n')
if minutes is None:
util.show('\n\n\nProcess: %s\nPath: %s\nbuilt in %s seconds.\n\n' % (self.get_basename(), self.get_path(), seconds))
if minutes is not None:
util.show('\n\n\nProcess: %s\nPath: %s\nbuilt in %s minutes, %s seconds.\n\n' % (self.get_basename(), self.get_path(), minutes, seconds))
util.set_env('VETALA_CURRENT_PROCESS', prev_process)
if manage_node_editor_inst:
manage_node_editor_inst.restore_add_new_nodes()
return status_list
def set_runtime_value(self, name, value):
"""
This stores data to run between scripts.
Args:
name (str): The name of the script.
value : Can be many different types including str, list, tuple, float, int, etc.
Returns:
None
"""
util.show('> Runtime Variable: %s, value: %s.' % (name, value))
self.runtime_values[name] = value
def get_runtime_value(self, name):
"""
Get the value stored with set_runtime_value.
Args:
name (str): The name given to the runtime value in set_runtime_value.
Returns:
The value stored in set_runtime_value.
"""
if name in self.runtime_values:
value = self.runtime_values[name]
util.show('< Runtime Variable: %s, value: %s' % (name, value))
return value
def get_runtime_value_keys(self):
"""
Get the runtime value dictionary keys.
Every time a value is set with set_runtime_value, and dictionary entry is made.
Returns:
list: keys in runtime value dictionary.
"""
return list(self.runtime_values.keys())
def set_data_override(self, process_inst):
self._data_override = process_inst
def get_data_override(self):
return self._data_override
def run_batch(self):
process_path = self.get_path()
util.set_env('VETALA_CURRENT_PROCESS', process_path)
batch_path = util_file.get_process_batch_file()
util_file.maya_batch_python_file(batch_path)
def run_deadline(self):
path = self.get_path()
name = self.get_basename()
stamp = util_file.get_date_and_time()
batch_name = 'Vetala Batch: %s (%s)' % (name, util_file.get_date_and_time(separators=False))
sub_processes = self._get_enabled_children()
sub_process_dict = {}
for sub_process in sub_processes:
sub_path = util_file.join_path(path, sub_process)
dependents = []
for key in sub_process_dict:
key_id = sub_process_dict[key]
if key.startswith(sub_process):
dependents.append(key_id)
sub_job_id = run_deadline(sub_path, sub_process, parent_jobs=dependents, batch_name=batch_name)
sub_process_dict[sub_process] = sub_job_id
all_dependents = sub_process_dict.values()
run_deadline(path, name, parent_jobs=all_dependents, batch_name=batch_name)
def reset_runtime(self):
if self._data_override:
self._runtime_values = {}
self._data_override._put = Put()
else:
self.runtime_values = {}
self._put = Put()
def run_ramen(self, graph_name='graph1'):
ramen_path = self.get_ramen_path()
full_path = '%s/graphs/%s/ramen.json' % (ramen_path, graph_name)
util.show('Running Ramen: %s' % full_path)
from ..ramen import eval as ramen_eval
ramen_eval.run_json(full_path)
def set_unreal_skeletal_mesh(self, filepath):
util.set_env('VETALA_CURRENT_PROCESS_SKELETAL_MESH', value)
self._unreal_skeletal_mesh = filepath
def get_unreal_skeletal_mesh(self):
return self._unreal_skeletal_mesh
def set_unreal_control_rig(self, control_rig):
if not in_unreal:
return
if control_rig == None:
unreal_lib.graph.set_current_control_rig(None)
return
if isinstance(control_rig, unreal.ControlRigBlueprint):
control_rig_inst = control_rig
else:
control_rig_inst = unreal_lib.core.get_control_rig_object(control_rig)
unreal_lib.graph.current_control_rig = control_rig_inst
def get_unreal_control_rig(self):
from .. import unreal_lib
return unreal_lib.graph.current_control_rig
class Put(dict):
"""
keeps data between code runs
"""
def __init__(self):
self.__dict__['_cache_feedback'] = {}
def __getattribute__(self, attr):
try:
value = object.__getattribute__(self, attr)
except:
util.warning('Put has no attribute: %s' % attr)
return
if attr == '__dict__':
return value
if attr not in self.__dict__['_cache_feedback']:
util.show('< put.%s %s' % (attr, value))
self.__dict__['_cache_feedback'][attr] = None
return value
def __setitem__(self, key, value):
exec('self.%s = value' % key)
self.__dict__[key] = value
def __setattr__(self, key, value):
if key != '_cache_feedback' and key != 'last_return':
util.show('> put.%s=%s' % (key, value))
super(Put, self).__setattr__(key, value)
self.__dict__['_cache_feedback'][key] = None
def set(self, name, value):
exec('self.%s = %s' % (name, value))
def get_attribute_names(self):
return list(self.attribute_names.keys())
def get_default_directory():
"""
Get a default directory to begin in.
The directory is different if running from inside Maya.
Returns:
str: Path to the default directory.
"""
return util_file.get_default_directory()
def copy(source_file_or_folder, target_file_or_folder, description=''):
is_source_a_file = util_file.is_file(source_file_or_folder)
copied_path = -1
if is_source_a_file:
if util_file.exists(target_file_or_folder):
util_file.delete_file(target_file_or_folder)
copied_path = util_file.copy_file(source_file_or_folder, target_file_or_folder)
if not is_source_a_file:
if not util_file.exists(source_file_or_folder):
util.warning('Nothing to copy: %s Data was probably created but not saved to yet. '
% util_file.get_dirname(source_file_or_folder))
return
if util_file.exists(target_file_or_folder):
util_file.delete_dir(target_file_or_folder)
copied_path = util_file.copy_dir(source_file_or_folder, target_file_or_folder)
if not copied_path:
util.warning('Error copying %s to %s' % (source_file_or_folder, target_file_or_folder))
return
if copied_path:
util.show('Finished copying %s from %s to %s' % (description, source_file_or_folder, target_file_or_folder))
version = util_file.VersionFile(copied_path)
version.save('Copied from %s' % source_file_or_folder)
def copy_process(source_process, target_directory=None):
"""
source process is an instance of a process that you want to copy
target_process is the instance of a process you want to copy to.
If no target_process is specified, the target process will be set to the directory where the source process is located automatically.
If there is already a process named the same in the target process, the name will be incremented.
If you need to give the copy a specific name, you should rename it after copy.
Args:
source_process (instance): The instance of a process.
target_directory (str): TODO: Fill description.
"""
if target_directory:
parent_directory = util_file.get_dirname(target_directory)
if parent_directory:
if parent_directory == source_process.get_path():
util.error('Cannot paste parent under child. Causes recursion error')
return
sub_folders = source_process.get_sub_processes()
source_name = source_process.get_name()
source_name = source_name.split('/')[-1]
if not target_directory:
target_directory = util_file.get_dirname(source_process.get_path())
if not util_file.get_permission(target_directory):
util.warning('Could not get permission in directory: %s' % target_directory)
return
new_name = get_unused_process_name(target_directory, source_name)
new_process = Process()
new_process.set_directory(target_directory)
new_process.load(new_name)
new_process.create()
data_folders = source_process.get_data_folders()
code_folders = source_process.get_code_names(include_scripts=True)
settings = source_process.get_setting_names()
ramens = source_process.get_ramen_graphs()
if util.in_maya:
progress = core.ProgressBar()
data_count = len(data_folders)
if data_count:
if util.in_maya:
progress.set_count(len(data_folders))
for data_folder in data_folders:
if util.in_maya:
progress.status('Copying Data: %s' % data_folder)
progress.inc()
copy_process_data(source_process, new_process, data_folder)
if util.in_maya:
if progress.break_signaled():
progress.end()
return
manifest_found = False
if code_folders:
if 'manifest' in code_folders:
code_folders.remove('manifest')
manifest_found = True
code_count = len(code_folders)
if code_count:
if util.in_maya:
progress.set_count(len(code_folders))
progress.inc(0)
for code_folder in code_folders:
if util.in_maya:
progress.status('Copying Code: %s' % code_folder)
progress.inc()
copy_process_code(source_process, new_process, code_folder)
if util.in_maya:
if progress.break_signaled():
progress.end()
return
if util.in_maya:
progress.end()
for sub_folder in sub_folders:
sub_process = new_process.get_sub_process(sub_folder)
source_sub_process = source_process.get_sub_process(sub_folder)
if not sub_process.is_process():
copy_process(source_sub_process, new_process.get_path())
if manifest_found:
copy_process_code(source_process, new_process, 'manifest')
for setting in settings:
copy_process_setting(source_process, new_process, setting)
for ramen in ramens:
copy_process_ramen(source_process, new_process, ramen)
return new_process
def copy_process_into(source_process, target_process, merge_sub_folders=False):
"""
source_full_path = source_process.get_path()
target_full_path = target_process.get_path()
if source_full_path == target_full_path:
return
"""
if source_process.process_name == target_process.process_name and source_process.directory == target_process.directory:
util.warning('Source and target process are the same. Skipping merge.')
return
if not target_process:
return
if not target_process.is_process():
return
sub_folders = source_process.get_sub_processes()
source_name = source_process.get_name()
source_name = source_name.split('/')[-1]
data_folders = source_process.get_data_folders()
code_folders = source_process.get_code_folders()
settings = source_process.get_setting_names()
for data_folder in data_folders:
copy_process_data(source_process, target_process, data_folder)
manifest_found = False
if 'manifest' in code_folders:
code_folders.remove('manifest')
manifest_found = True
for code_folder in code_folders:
copy_process_code(source_process, target_process, code_folder)
if sub_folders and merge_sub_folders:
for sub_folder in sub_folders:
sub_target = target_process.get_sub_process(sub_folder)
if sub_target:
if not sub_target.is_process():
sub_target.create()
sub_process = source_process.get_sub_process(sub_folder)
copy_process_into(sub_process, sub_target)
if manifest_found:
copy_process_code(source_process, target_process, 'manifest')
for setting in settings:
copy_process_setting(source_process, target_process, setting)
def copy_process_data(source_process, target_process, data_name, replace=False, sub_folder=None):
"""
source_process and target_process need to be instances of the Process class.
The instances should be set to the directory and process name desired to work with.
data_name specifies the name of the data folder to copy.
If replace the existing data with the same name will be deleted and replaced by the copy.
Args:
source_process (str): The instance of a process.
target_process (str): The instance of a process.
data_name (str): The name of the data to copy.
replace (bool): Whether to replace the data in the target process or just version up.
sub_folder (str): The name of the sub folder to copy
"""
data_type = source_process.get_data_type(data_name)
is_folder = False
if not data_type:
is_folder = True
data_folder_path = None
path = source_process.get_data_path()
if is_folder:
util_file.create_dir(data_name, path)
source_process.set_data_parent_folder(data_name)
target_process.set_data_parent_folder(data_name)
sub_folders = source_process.get_data_folders()
for sub_data_folder in sub_folders:
copy_process_data(source_process, target_process, sub_data_folder, replace=False, sub_folder=None)
source_process.set_data_parent_folder(None)
target_process.set_data_parent_folder(None)
return
if not target_process.is_process():
util.warning('Could not copy data, %s is not a vetala process.' % target_process)
return
if target_process.is_data_folder(data_name, sub_folder):
data_folder_path = target_process.get_data_folder(data_name, sub_folder)
if replace:
other_data_type = target_process.get_data_type(data_name)
if data_type != other_data_type:
target_process.delete_data(data_name, sub_folder)
copy_process_data(source_process, target_process, data_name, sub_folder)
return
if not target_process.is_data_folder(data_name, sub_folder):
data_folder_path = target_process.create_data(data_name, data_type, sub_folder)
instance = None
if not is_folder:
data_folder = data.DataFolder(data_name, path)
instance = data_folder.get_folder_data_instance()
if not instance:
util.warning('Could not get data folder instances for: %s' % data_name)
return
filepath = instance.get_file_direct(sub_folder)
if not filepath:
return
name = util_file.get_basename(filepath)
destination_directory = util_file.join_path(data_folder_path, name)
if not util_file.is_dir(data_folder_path):
util_file.create_dir(data_folder_path)
if sub_folder:
sub_path = target_process.create_sub_folder(data_name, sub_folder)
destination_directory = util_file.join_path(sub_path, name)
copy(filepath, destination_directory, data_name)
handle_copy_process_data_extras(filepath, destination_directory, target_process, data_name)
if not sub_folder:
sub_folders = source_process.get_data_sub_folder_names(data_name)
for sub_folder in sub_folders:
copy_process_data(source_process, target_process, data_name, replace, sub_folder)
def handle_copy_process_data_extras(filepath, destination_directory, target_process, data_name):
"""
copies extra folders
"""
parent_path = util_file.get_dirname(filepath)
parent_dest_path = util_file.get_dirname(destination_directory)
settings = util_file.join_path(parent_path, 'data.json')
settings_dest = util_file.join_path(parent_dest_path, 'data.json')
util_file.copy_file(settings, settings_dest)
if settings_dest in util_file.SettingsFile.__cache_settings__:
util_file.SettingsFile.__cache_settings__.pop(settings_dest)
target_process.cache_data_type_read(data_name)
if util_file.is_dir(destination_directory):
folders = util_file.get_folders(parent_path)
for folder in folders:
if folder.startswith('.'):
continue
other_folder = util_file.join_path(parent_path, folder)
other_dest_folder = util_file.join_path(parent_dest_path, folder)
if other_dest_folder == destination_directory:
continue
util_file.copy_dir(other_folder, other_dest_folder)
def copy_process_code(source_process, target_process, code_name, replace=False):
"""
source_process and target_process need to be instances of the Process class.
The instances should be set to the directory and process name desired to work with.
code_name specifies the name of the code folder to copy.
If replace the existing code with the same name will be deleted and replaced by the copy.
Args:
source_process (str): The instance of a process.
target_process (str): The instance of a process.
code_name (str): The name of the code to copy.
replace (bool): Whether to replace the code in the target process or just version up.
"""
if code_name is None:
return
code_folder_path = None
if target_process.is_code_folder(code_name):
data_type = source_process.get_code_type(code_name)
code_folder_path = target_process.get_code_folder(code_name)
code_filepath = target_process.get_code_file(code_name)
if not code_filepath:
target_process.create_code(code_name, data_type, inc_name=False, import_data=None)
code_filepath = target_process.get_code_file(code_name)
code_file = util_file.get_basename(code_filepath)
code_folder_path = util_file.join_path(code_folder_path, code_file)
other_data_type = target_process.get_code_type(code_name)
if data_type != other_data_type:
if replace:
target_process.delete_code(code_name)
copy_process_code(source_process, target_process, code_name)
return
if source_process.is_code_folder(code_name):
data_type = source_process.get_code_type(code_name)
code_folder_path = target_process.create_code(code_name, data_type)
path = source_process.get_code_path()
data_folder = data.DataFolder(code_name, path)
instance = data_folder.get_folder_data_instance()
if not instance:
return
filepath = instance.get_file()
copied_path = None
destination_directory = None
if filepath:
destination_directory = code_folder_path
path = target_process.get_code_path()
data.DataFolder(code_name, path)
data_folder.set_data_type(data_type)
if util_file.is_file(filepath):
copied_path = util_file.copy_file(filepath, destination_directory)
if util_file.is_dir(filepath):
copied_path = util_file.copy_dir(filepath, destination_directory)
if copied_path:
version = util_file.VersionFile(copied_path)
version.save('Copied from %s' % filepath)
if not copied_path:
util.warning('Error copying %s to %s' % (filepath, destination_directory))
return
source_path = util_file.join_path(source_process.get_code_path(), code_name)
target_path = util_file.join_path(target_process.get_code_path(), code_name)
else:
source_path = util_file.join_path(source_process.get_code_path(), code_name)
target_path = util_file.join_path(target_process.get_code_path(), code_name)
if util_file.is_dir(source_path):
util_file.create_dir(code_name, target_process.get_code_path())
if util_file.is_file(source_path):
util_file.copy_file(source_path, target_path)
util.show('Finished copying code from %s to %s' % (source_path, target_path))
def copy_process_setting(source_process, target_process, setting_name):
filepath = source_process.get_setting_file(setting_name)
if not filepath:
return
destination_path = target_process.get_path()
destination_filepath = target_process.get_setting_file(setting_name)
if util_file.is_file(destination_filepath):
name = util_file.get_basename(destination_filepath)
directory = util_file.get_dirname(destination_filepath)
util_file.delete_file(name, directory)
util_file.copy_file(filepath, destination_path)
source_path = source_process.get_path()
util.show('Finished copying options from %s' % source_path)
def copy_process_ramen(source_process, target_process, graph_name):
filepath = source_process.get_ramen_file(graph_name)
if not filepath:
return
target_filepath = target_process.create_ramen_file(graph_name)
copied_path = util_file.copy_file(filepath, target_filepath)
if copied_path:
version = util_file.VersionFile(copied_path)
version.save('Copied from %s' % filepath)
if not copied_path:
util.warning('Error copying %s to %s' % (filepath, target_filepath))
return
def get_vetala_settings_inst():
settings_path = os.environ.get('VETALA_SETTINGS')
if not settings_path:
return
settings_inst = util_file.SettingsFile()
settings_inst.set_directory(settings_path)
return settings_inst
def initialize_project_settings(project_directory, settings_inst=None):
if not settings_inst:
settings_inst = get_vetala_settings_inst()
project_settings_dict = {}
if not settings_inst.has_setting('project settings'):
project_settings_dict = {project_directory: {}}
settings_inst.set('project settings', project_settings_dict)
if not project_settings_dict:
project_settings_dict = settings_inst.get('project settings')
if project_directory not in project_settings_dict:
project_settings_dict[project_directory] = {}
settings_inst.set('project settings', project_settings_dict)
return project_settings_dict
def get_project_setting(name, project_directory, settings_inst=None):
if not settings_inst:
settings_inst = get_vetala_settings_inst()
if not settings_inst.has_setting('project settings'):
return
value = None
project_settings_dict = settings_inst.get('project settings')
if project_directory not in project_settings_dict:
return
if name in project_settings_dict[project_directory]:
value = project_settings_dict[project_directory][name]
return value
def set_project_setting(name, value, project_directory, settings_inst=None):
if settings_inst:
settings_inst.reload()
if not settings_inst:
settings_inst = get_vetala_settings_inst()
if not settings_inst.has_setting('project settings'):
return
project_settings_dict = settings_inst.get('project settings')
if project_directory not in project_settings_dict:
return
project_settings_dict[project_directory][name] = value
settings_inst.set('project settings', project_settings_dict)
def get_custom_backup_directory(process_directory):
settings = util_file.get_vetala_settings_inst()
backup = settings.get('backup_directory')
backup_directory = None
if util_file.is_dir(backup):
project = settings.get('project_directory')
process_inst = Process()
process_inst.set_directory(process_directory)
backup_directory = process_directory
backup_settings = util_file.SettingsFile()
backup_settings.set_directory(backup)
project_name = util_file.fix_slashes(project)
project_name = project_name.replace('/', '_')
project_name = project_name.replace(':', '_')
backup_settings.set(project_name, project)
backup_directory = util_file.create_dir(project_name, backup)
util.show('Backing up to custom directory: %s' % backup_directory)
process_path = process_inst.get_path()
common_path = util_file.remove_common_path_simple(project, process_path)
backup_directory = util_file.create_dir(util_file.join_path(backup_directory, common_path))
if not backup_directory:
return
return backup_directory
def backup_process(process_path=None, comment='Backup', backup_directory=None):
"""
Backs up the process at the path to the process/.backup folder
If backup directory given, backs up there.
"""
log.debug('Backup process at path: %s' % process_path)
log.debug('Backup to custom path: %s' % backup_directory)
process_inst = Process()
process_inst.set_directory(process_path)
if not backup_directory:
backup_directory = get_custom_backup_directory(process_path)
log.debug('Final backup path: %s' % backup_directory)
process_inst.backup(comment, backup_directory)
def get_process_builtins(process):
builtins = {'process': process}
code_builtins = util.get_code_builtins()
builtins.update(code_builtins)
return builtins
def reset_process_builtins(process, custom_builtins=None):
if custom_builtins is None:
custom_builtins = {}
if not custom_builtins:
custom_builtins = {}
builtins = get_process_builtins(process)
custom_builtins.update(builtins)
util.reset_code_builtins(builtins)
def setup_process_builtins(process, custom_builtins=None):
if custom_builtins is None:
custom_builtins = {}
if not custom_builtins:
custom_builtins = {}
builtins = get_process_builtins(process)
custom_builtins.update(builtins)
util.setup_code_builtins(custom_builtins)
def run_deadline(process_directory, name, parent_jobs=None, batch_name=None):
if parent_jobs is None:
parent_jobs = []
deadline_command = util_file.get_deadline_command_from_settings()
if not deadline_command:
return
process_inst = Process()
process_inst.set_directory(process_directory)
data_path = process_inst.get_data_path(in_folder=False)
locator = cmds.spaceLocator()
maya_filename = util_file.join_path(data_path, 'deadline.ma')
if util_file.is_file_in_dir('deadline.ma', data_path):
util_file.delete_file('deadline.ma', data_path)
cmds.file(maya_filename, type='mayaAscii', exportSelected=True)
cmds.delete(locator)
settings = util_file.get_vetala_settings_inst()
pool = settings.get('deadline_pool')
group = settings.get('deadline_group')
department = settings.get('deadline_department')
from ..render_farm import util_deadline
job = util_deadline.MayaJob()
if batch_name:
job.set_job_setting('BatchName', batch_name)
if parent_jobs:
job.set_parent_jobs(parent_jobs)
job.set_current_process(process_directory)
job.set_task_info(pool, group, 100)
comment = ''
job.set_task_description('Vetala Process: %s' % name, department, comment)
job.set_deadline_path(deadline_command)
job.set_output_path(data_path)
job.set_scene_file_path(maya_filename)
job_id = job.submit()
return job_id
|
import unreal
from .... import shelf_core
import random
from .. import gallery_class
class GallaryButton(gallery_class.GallaryWidgetFactory):
def with_content(self):
return "Button"
def on_click(self, button: unreal.Button):
def on_click1():
color = [random.random(), random.random(), random.random(), 1]
button.set_background_color(unreal.LinearColor(*color))
print(color)
#button.set_color_and_opacity(unreal.LinearColor(*color, 1))
return on_click1
def create(self):
icon_path = unreal.Paths.combine([unreal.Paths.convert_relative_path_to_full(unreal.Paths.project_plugins_dir()), "UMGForPython/project/.png"])
button = shelf_core.create_button("Button", icon_path)
button.on_clicked.add_callable(self.on_click(button))
return button
|
# /project/
# @CBgameDev Optimisation Script - Log Static Mesh UV Channel Count For LOD 0
# /project/
import unreal
import sys # So we can grab arguments fed into the python script
import os
EditAssetLib = unreal.EditorAssetLibrary()
StatMeshLib = unreal.EditorStaticMeshLibrary()
workingPath = "/Game/" # Using the root directory
notepadFilePath = os.path.dirname(__file__) + "//PythonOptimiseLog.txt"
allAssets = EditAssetLib.list_assets(workingPath, True, False)
selectedAssetsPath = workingPath
LogStringsArray = []
numOfOptimisations = 0
numOfChannelsToCheckFor = int(float(sys.argv[1])) # pull value sent with script
with unreal.ScopedSlowTask(len(allAssets), selectedAssetsPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
_assetData = EditAssetLib.find_asset_data(asset)
_assetName = _assetData.get_asset().get_name()
_assetPathName = _assetData.get_asset().get_path_name()
_assetClassName = _assetData.get_asset().get_class().get_name()
if _assetClassName == "StaticMesh":
_staticMeshAsset = unreal.StaticMesh.cast(_assetData.get_asset())
_howManyUV_channels = StatMeshLib.get_num_uv_channels(_staticMeshAsset, 0)
if _howManyUV_channels >= numOfChannelsToCheckFor:
LogStringsArray.append(" [%s] - %s ------------> At Path: %s \n" % (_howManyUV_channels, _assetName, _assetPathName))
# unreal.log("Asset Name: [%s] %s Path: %s \n" % (_howManyUV_channels, _assetName, _assetPathName))
numOfOptimisations += 1
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
# Write results into a log file
# /project/
TitleOfOptimisation = "Log Static Mesh UV Channel Count For LOD 0"
DescOfOptimisation = "Searches the entire project for static mesh assets that have a UV channel count above the value that you have set"
SummaryMessageIntro = "-- Static Mesh Assets With UV Channels Numbering >= %s --" % numOfChannelsToCheckFor
if unreal.Paths.file_exists(notepadFilePath): # Check if txt file already exists
os.remove(notepadFilePath) # if does remove it
# Create new txt file and run intro text
file = open(notepadFilePath, "a+") # we should only do this if have a count?
file.write("OPTIMISING SCRIPT by @CBgameDev \n")
file.write("==================================================================================================== \n")
file.write(" SCRIPT NAME: %s \n" % TitleOfOptimisation)
file.write(" DESCRIPTION: %s \n" % DescOfOptimisation)
file.write("==================================================================================================== \n \n")
if numOfOptimisations <= 0:
file.write(" -- NONE FOUND -- \n \n")
else:
for i in range(len(LogStringsArray)):
file.write(LogStringsArray[i])
# Run summary text
file.write("\n")
file.write("======================================================================================================= \n")
file.write(" SUMMARY: \n")
file.write(" %s \n" % SummaryMessageIntro)
file.write(" Found: %s \n \n" % numOfOptimisations)
file.write("======================================================================================================= \n")
file.write(" Logged to %s \n" % notepadFilePath)
file.write("======================================================================================================= \n")
file.close()
os.startfile(notepadFilePath) # Trigger the notepad file to open
|
import unreal
def export_assets(self):
selected_assets = unreal.EditorUtilityLibrary.get_selected_assets()
for selectedAsset in selected_assets:
assetName = selectedAsset.get_name()
exportTask = unreal.AssetExportTask()
exportTask.automated = True
exportTask.filename = '/project/' + assetName + '.fbx'
exportTask.object = selectedAsset
exportTask.options = unreal.FbxExportOption()
exportTask.prompt = False
fbxExporter = unreal.StaticMeshExporterFBX()
exportTask.exporter = fbxExporter
fbxExporter.run_asset_export_task(exportTask)
|
# _
# (_)
# _ __ ___ __ _ _ __ ___ ___ _ __ _ ___ _ __ ___
# | '_ ` _ \ / _` | '_ ` _ \ / _ \| '_ \| |/ _ \ '_ ` _ \
# | | | | | | (_| | | | | | | (_) | | | | | __/ | | | | |
# |_| |_| |_|\__,_|_| |_| |_|\___/|_| |_|_|\___|_| |_| |_|
# www.mamoniem.com
# www.ue4u.xyz
#Copyright 2022 Muhammad A.Moniem (@_mamoniem). All Rights Reserved.
#
import unreal
@unreal.uclass()
class MyEditorLvelLib(unreal.EditorLevelLibrary):
pass
allLevelActors = MyEditorLvelLib().get_all_level_actors()
for actor in allLevelActors:
if (actor.get_class() == unreal.SkeletalMeshActor.static_class()):
unreal.log("Adding camera to actor: [%s]" % (actor.get_name()))
actor_class = unreal.CineCameraActor
actor_location = actor.get_actor_location() + (actor.get_actor_right_vector() * 200) #move it slightly to the front of the character
actor_location.z += 150 #move on up vec (z) to fit the Mannequin head hight
actor_rotation = actor.get_actor_rotation()
_spawnedActor = unreal.EditorLevelLibrary.spawn_actor_from_class(actor_class, actor_location, actor_rotation)
_focusSettings = unreal.CameraFocusSettings()
_focusSettings.manual_focus_distance = 1320.0
_focusSettings.focus_method = unreal.CameraFocusMethod.MANUAL
_focusSettings.focus_offset = 19.0
_focusSettings.smooth_focus_changes = False
_lookAtSettings = unreal.CameraLookatTrackingSettings()
_lookAtSettings.actor_to_track = actor
_lookAtSettings.allow_roll = True
_lookAtSettings.enable_look_at_tracking = True
_lookAtSettings.relative_offset = unreal.Vector(0.0,0.0,150.0)
_cineCameraComponent = _spawnedActor.get_cine_camera_component()
_cineCameraComponent.set_editor_property("focus_settings", _focusSettings)
_spawnedActor.lookat_tracking_settings = _lookAtSettings
unreal.log("****************************************************")
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__author__ = 'timmyliang'
__email__ = '[email protected]'
__date__ = '2021-08-10 16:30:14'
# import unreal
# asset_tool = unreal.AssetToolsHelpers.get_asset_tools()
# asset_registry = unreal.AssetRegistryHelpers.get_asset_registry()
# path = '/project/.90065_A'
# data = asset_registry.get_asset_by_object_path(path)
# print(data.is_u_asset())
# print(data.is_valid())
import unreal
asset_reg = unreal.AssetRegistryHelpers.get_asset_registry()
path ='/project/'
# NOTE ็ฝๅๅบๅ
ณ่็ physic
kwargs = {"class_names": ["PhysicsAsset"], "package_paths": [path],'recursive_paths':True}
for data in asset_reg.get_assets(unreal.ARFilter(**kwargs)):
print(data.object_path)
|
# -*- coding: utf-8 -*-
"""Extract camera from Unreal."""
import os
import unreal
from ayon_core.pipeline import publish
from ayon_unreal.api.pipeline import (
UNREAL_VERSION,
select_camera,
get_tracks
)
class ExtractCamera(publish.Extractor):
"""Extract a camera."""
label = "Extract Camera"
hosts = ["unreal"]
families = ["camera"]
optional = True
def process(self, instance):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# Define extract output file path
staging_dir = self.staging_dir(instance)
fbx_filename = "{}.fbx".format(instance.name)
# Perform extraction
self.log.info("Performing extraction..")
# Check if the loaded level is the same of the instance
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
current_level = world.get_path_name()
assert current_level == instance.data.get("level"), \
"Wrong level loaded"
for member in instance.data.get('members'):
data = ar.get_asset_by_object_path(member)
if UNREAL_VERSION.major == 5:
is_level_sequence = (
data.asset_class_path.asset_name == "LevelSequence")
else:
is_level_sequence = (data.asset_class == "LevelSequence")
if is_level_sequence:
sequence = data.get_asset()
with select_camera(sequence):
if UNREAL_VERSION.major == 5:
params = None
if UNREAL_VERSION.minor >= 4:
params = unreal.SequencerExportFBXParams(
world=world,
root_sequence=sequence,
sequence=sequence,
bindings=sequence.get_bindings(),
fbx_file_name=os.path.join(staging_dir, fbx_filename)
)
else:
params = unreal.SequencerExportFBXParams(
world=world,
root_sequence=sequence,
sequence=sequence,
bindings=sequence.get_bindings(),
master_tracks=get_tracks(sequence),
fbx_file_name=os.path.join(staging_dir, fbx_filename)
)
unreal.SequencerTools.export_level_sequence_fbx(params)
elif UNREAL_VERSION.major == 4 and UNREAL_VERSION.minor == 26:
unreal.SequencerTools.export_fbx(
world,
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
else:
# Unreal 5.0 or 4.27
unreal.SequencerTools.export_level_sequence_fbx(
world,
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
if not os.path.isfile(os.path.join(staging_dir, fbx_filename)):
raise RuntimeError("Failed to extract camera")
if "representations" not in instance.data:
instance.data["representations"] = []
fbx_representation = {
'name': 'fbx',
'ext': 'fbx',
'files': fbx_filename,
'clipIn': instance.data["clipIn"],
'clipOut': instance.data["clipOut"],
"stagingDir": staging_dir,
}
instance.data["representations"].append(fbx_representation)
|
# Copyright Epic Games, Inc. All Rights Reserved
import unreal
from getpass import getuser
# Get a render queue
pipeline_subsystem = unreal.get_editor_subsystem(
unreal.MoviePipelineQueueSubsystem
)
# Get the project settings
project_settings = unreal.get_default_object(
unreal.MovieRenderPipelineProjectSettings
)
# Get the pipeline queue
movie_pipeline_queue = pipeline_subsystem.get_queue()
pipeline_executor = None
def get_executor_instance(is_remote):
"""
Method to return an instance of a render executor
:param bool is_remote: Flag to use the local or remote executor class
:return: Executor instance
"""
is_soft_class_object = True
# Convert the SoftClassPath into a SoftClassReference.
# local executor class from the project settings
try:
class_ref = unreal.SystemLibrary.conv_soft_class_path_to_soft_class_ref(
project_settings.default_local_executor
)
# For Backwards compatibility. Older version returned a class object from
# the project settings
except TypeError:
class_ref = project_settings.default_local_executor
is_soft_class_object = False
if is_remote:
try:
# Get the remote executor class
class_ref = (
unreal.SystemLibrary.conv_soft_class_path_to_soft_class_ref(
project_settings.default_remote_executor
)
)
except TypeError:
class_ref = project_settings.default_remote_executor
is_soft_class_object = False
if not class_ref:
raise RuntimeError(
"Failed to get a class reference to the default executor from the "
"project settings. Check the logs for more details."
)
if is_soft_class_object:
# Get the executor class as this is required to get an instance of
# the executor
executor_class = unreal.SystemLibrary.load_class_asset_blocking(
class_ref
)
else:
executor_class = class_ref
global pipeline_executor
pipeline_executor = unreal.new_object(executor_class)
return pipeline_executor
def execute_render(is_remote=False, executor_instance=None, is_cmdline=False):
"""
Starts a render
:param bool is_remote: Flag to use the local or remote executor class
:param executor_instance: Executor instance used for rendering
:param bool is_cmdline: Flag to determine if the render was executed from a commandline.
"""
if not executor_instance:
executor_instance = get_executor_instance(is_remote)
if is_cmdline:
setup_editor_exit_callback(executor_instance)
# Start the Render
unreal.log("MRQ job started...")
unreal.log(f"Is remote render: {is_remote}")
pipeline_subsystem.render_queue_with_executor_instance(executor_instance)
return executor_instance
def setup_editor_exit_callback(executor_instance):
"""
Setup callbacks for when you need to close the editor after a render
:param executor_instance: Movie Pipeline executor instance
"""
unreal.log("Executed job from commandline, setting up shutdown callback..")
# add a callable to the executor to be executed when the pipeline is done rendering
executor_instance.on_executor_finished_delegate.add_callable(
shutdown_editor
)
# add a callable to the executor to be executed when the pipeline fails to render
executor_instance.on_executor_errored_delegate.add_callable(
executor_failed_callback
)
def shutdown_editor(movie_pipeline=None, results=None):
"""
This method shutdown the editor
"""
unreal.log("Rendering is complete! Exiting...")
unreal.SystemLibrary.quit_editor()
def executor_failed_callback(executor, pipeline, is_fatal, error):
"""
Callback executed when a job fails in the editor
"""
unreal.log_error(
f"An error occurred while executing a render.\n\tError: {error}"
)
unreal.SystemLibrary.quit_editor()
def get_asset_data(name_or_path, asset_class):
"""
Get the asset data for the asset name or path based on its class.
:param str name_or_path: asset name or package name
:param str asset_class: Asset class filter to use when looking for assets in registry
:raises RuntimeError
:return: Asset package if it exists
"""
# Get all the specified class assets in the project.
# This is the only mechanism we can think of at the moment to allow
# shorter path names in the commandline interface. This will allow users
# to only provide the asset name or the package path in the commandline
# interface based on the assumption that all assets are unique
asset_registry = unreal.AssetRegistryHelpers.get_asset_registry()
# If the asset registry is still loading, wait for it to finish
if asset_registry.is_loading_assets():
unreal.log_warning("Asset Registry is loading, waiting to complete...")
asset_registry.wait_for_completion()
unreal.log("Asset Registry load complete!")
assets = asset_registry.get_assets(
unreal.ARFilter(class_names=[asset_class])
)
# This lookup could potentially be very slow
for asset in assets:
# If a package name is provided lookup the package path. If a
# packages startwith a "/" this signifies a content package. Content
# packages can either be Game or plugin. Game content paths start
# with "/Game" and plugin contents startswith /<PluginName>
if name_or_path.startswith("/"):
# Reconstruct the package path into a package name. eg.
# /project/.package_name -> /project/
name_or_path = name_or_path.split(".")[0]
if asset.package_name == name_or_path:
return asset
else:
if asset.asset_name == name_or_path:
return asset
else:
raise RuntimeError(f"`{name_or_path}` could not be found!")
def setup_remote_render_jobs(batch_name, job_preset, render_jobs):
"""
This function sets up a render job with the options for a remote render.
This is configured currently for deadline jobs.
:param str batch_name: Remote render batch name
:param str job_preset: Job Preset to use for job details
:param list render_jobs: The list of render jobs to apply the ars to
"""
unreal.log("Setting up Remote render executor.. ")
# Update the settings on the render job.
# Currently, this is designed to work with deadline
# Make sure we have the relevant attribute on the jobs. This remote cli
# setup can be used with out-of-process rendering and not just deadline.
unset_job_properties = []
for job in render_jobs:
if hasattr(job, "batch_name") and not batch_name:
unset_job_properties.append(job.name)
if hasattr(job, "job_preset") and not job_preset:
unset_job_properties.append(job.name)
# If we find a deadline property on the job, and it's not set, raise an
# error
if unset_job_properties:
raise RuntimeError(
"These jobs did not have a batch name, preset name or preset "
"library set. This is a requirement for deadline remote rendering. "
"{jobs}".format(
jobs="\n".join(unset_job_properties))
)
for render_job in render_jobs:
render_job.batch_name = batch_name
render_job.job_preset = get_asset_data(
job_preset,
"DeadlineJobPreset"
).get_asset()
def set_job_state(job, enable=False):
"""
This method sets the state on a current job to enabled or disabled
:param job: MoviePipeline job to enable/disable
:param bool enable: Flag to determine if a job should be or not
"""
if enable:
# Check for an enable attribute on the job and if not move along.
# Note: `Enabled` was added to MRQ that allows disabling all shots in
# a job. This also enables backwards compatibility.
try:
if not job.enabled:
job.enabled = True
except AttributeError:
# Legacy implementations assumes the presence of a job means its
# enabled
return
try:
if job.enabled:
job.enabled = False
except AttributeError:
# If the attribute is not available, go through and disable all the
# associated shots. This behaves like a disabled job
for shot in job.shot_info:
unreal.log_warning(
f"Disabling shot `{shot.inner_name}` from current render job `{job.job_name}`"
)
shot.enabled = False
def update_render_output(job, output_dir=None, output_filename=None):
"""
Updates that output directory and filename on a render job
:param job: MRQ job
:param str output_dir: Output directory for renders
:param str output_filename: Output filename
"""
# Get the job output settings
output_setting = job.get_configuration().find_setting_by_class(
unreal.MoviePipelineOutputSetting
)
if output_dir:
new_output_dir = unreal.DirectoryPath()
new_output_dir.set_editor_property(
"path",
output_dir
)
unreal.log_warning(
f"Overriding output directory! New output directory is `{output_dir}`."
)
output_setting.output_directory = new_output_dir
if output_filename:
unreal.log_warning(
"Overriding filename format! New format is `{output_filename}`."
)
output_setting.file_name_format = output_filename
def update_queue(
jobs=None,
shots=None,
all_shots=False,
user=None,
):
"""
This function configures and renders a job based on the arguments
:param list jobs: MRQ jobs to render
:param list shots: Shots to render from jobs
:param bool all_shots: Flag for rendering all shots
:param str user: Render user
"""
# Iterate over all the jobs and make sure the jobs we want to
# render are enabled.
# All jobs that are not going to be rendered will be disabled if the
# job enabled attribute is not set or their shots disabled.
# The expectation is, If a job name is specified, we want to render the
# current state of that job.
# If a shot list is specified, we want to only render that shot alongside
# any other whole jobs (job states) that are explicitly specified,
# else other jobs or shots that are not
# needed are disabled
for job in movie_pipeline_queue.get_jobs():
enable_job = False
# Get a list of jobs to enable.
# This will enable jobs in their current queue state awaiting other
# modifications if shots are provided, if only the job name is
# specified, the job will be rendered in its current state
if jobs and (job.job_name in jobs):
enable_job = True
# If we are told to render all shots. Enable all shots for all jobs
if all_shots:
for shot in job.shot_info:
shot.enabled = True
# set the user for the current job
job.author = user or getuser()
# Set the job to enabled and move on to the next job
set_job_state(job, enable=True)
continue
# If we have a list of shots, go through the shots associated
# with this job, enable the shots that need to be rendered and
# disable the others
if shots and (not enable_job):
for shot in job.shot_info:
if shot.inner_name in shots or (shot.outer_name in shots):
shot.enabled = True
enable_job = True
else:
unreal.log_warning(
f"Disabling shot `{shot.inner_name}` from current render job `{job.job_name}`"
)
shot.enabled = False
if enable_job:
job.author = user or getuser()
# Set the state of the job by enabling or disabling it.
set_job_state(job, enable=enable_job)
|
# ๆฌ่ๆฌ็จไบๅฏผๅบๅฝๅๅบๆฏไธญๆฏไธชcomponentไธ้ขๆๆbody ็ relative transformๅ basecolor ๅฐ jsonๆไปถไธญ
import unreal
import json
from export_part_transform_relative_componet import get_actor_base_color
# ้ๅฝๆถ้ๆๆๅญ็บงไธญ็StaticMeshActor
def get_all_child_static_meshes(actor):
static_mesh_actors = []
def _recursive_collect(child_actor):
if isinstance(child_actor, unreal.StaticMeshActor):
static_mesh_actors.append(child_actor)
for grandchild in child_actor.get_attached_actors():
_recursive_collect(grandchild)
# ไปๅๅงActorๅผๅง้ๅฝ
for child in actor.get_attached_actors():
_recursive_collect(child)
for static_mesh_actor in static_mesh_actors:
unreal.log(static_mesh_actor.get_actor_label())
return static_mesh_actors
def get_selected_child_staticmesh(root_actor : unreal.Actor):
if not root_actor:
unreal.log_warning("No DatasmithSceneActor Found!")
return []
return get_all_child_static_meshes(root_actor)
def get_relative_trans_with_color(saved_json_folder : str):
actors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_all_level_actors()
ddm_datasmith = unreal.EditorFilterLibrary.by_class(actors, unreal.DatasmithSceneActor.static_class())[0]
component_actors = ddm_datasmith.get_attached_actors()[0].get_attached_actors() # DatasmithSceneActor > Actor > Actor[This]
part_count = 0
transform_data = []
for component_actor in component_actors:
part_count += 1
component_actor_transform = component_actor.get_actor_transform()
static_mesh_actors = get_selected_child_staticmesh(component_actor)
unreal.log_warning(f"Component Actor: {component_actor.get_actor_label()} has {len(static_mesh_actors)} StaticMeshActor")
for static_mesh_actor in static_mesh_actors:
static_mesh_actor_transform = static_mesh_actor.get_actor_transform()
relative_transform = component_actor_transform.inverse() * static_mesh_actor_transform
relative_rotator = relative_transform.rotation.rotator()
color = get_actor_base_color(static_mesh_actor)
transform_info = {
"name": static_mesh_actor.get_actor_label(),
"location": {
"x": relative_transform.translation.x,
"y": relative_transform.translation.y,
"z": relative_transform.translation.z
},
"rotation": {
"roll": relative_rotator.roll,
"pitch": relative_rotator.pitch,
"yaw": relative_rotator.yaw
},
"color": {
"r": color.r,
"g": color.g,
"b": color.b,
"a": color.a
}
}
transform_data.append(transform_info)
file_path = f"{saved_json_folder}{static_mesh_actor.get_actor_label()}.json"
with open(file_path, 'w') as json_file:
json.dump(transform_info, json_file, indent=4)
unreal.log(f"Transform {len(transform_data)} data exported to {file_path}")
# ๆฏๆฌก่ฟญไปฃๆธ
็ฉบๅ่กจ
transform_data = []
static_mesh_actors = []
unreal.log(f"Exported {part_count} Json to {saved_json_folder}")
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__author__ = 'timmyliang'
__email__ = '[email protected]'
__date__ = '2020-08-28 08:52:34'
import os
import unreal
sys_lib = unreal.SystemLibrary()
def list_menu(num=1000):
menu_list = set()
for i in range(num):
obj = unreal.find_object(None,"/project/.ToolMenus_0:ToolMenu_%s" % i)
if not obj:
continue
menu_name = str(obj.menu_name)
if menu_name != "None":
menu_list.add(menu_name)
return list(menu_list)
def unreal_progress(tasks,label =u"่ฟๅบฆ",total=None):
total = total if total else len(tasks)
with unreal.ScopedSlowTask(total, label) as task:
task.make_dialog(True)
for i, item in enumerate(tasks):
if task.should_cancel():
break
task.enter_progress_frame(1, "%s %s/%s" % (label,i, total))
yield i, item
def convert_to_filename(path):
content = sys_lib.get_project_content_directory()
path = path.replace("/Game/", content)
path = os.path.abspath(path).replace("\\", "/")
if os.path.isdir(path):
return path
path = os.path.splitext(path)[0]
return "%s.uasset" % path
|
"""
This script describes how to import a USD Stage into actors and assets, also optionally specifying specific prim
paths to import.
The `prims_to_import` property can be left at its default value of ["/"] (a list with just the "/" prim path in it)
to import the entire stage.
The provided paths in `prims_to_import` are used as a USD population mask when opening the stage, and as such are
subject to the rules described here: https://graphics.pixar.com/project/.html
As an example, consider the following USD stage:
#usda 1.0
def Xform "ParentA"
{
def Xform "ChildA"
{
}
def Xform "ChildB"
{
}
}
def Xform "ParentB"
{
def Xform "ChildC"
{
}
def Xform "ChildD"
{
}
}
In general, the main thing to keep in mind is that if "/ParentA" is within `prims_to_import`, ParentA and *all* of its
children will be imported. As a consequence, having both "/ParentA" and "/project/" on the list is reduntant, as
"/ParentA" will already lead to "/project/" being imported, as previously mentioned.
"""
import unreal
ROOT_LAYER_FILENAME = r"/project/.usda"
DESTINATION_CONTENT_PATH = r"/project/"
options = unreal.UsdStageImportOptions()
options.import_actors = True
options.import_geometry = True
options.import_skeletal_animations = True
options.import_level_sequences = True
options.import_materials = True
options.prim_path_folder_structure = False
options.prims_to_import = [
"/ParentA", # This will import ParentA, ChildA and ChildB
"/project/" # This will import ParentB and ChildC only (and *not* import ChildD)
]
task = unreal.AssetImportTask()
task.set_editor_property('filename', ROOT_LAYER_FILENAME)
task.set_editor_property('destination_path', DESTINATION_CONTENT_PATH)
task.set_editor_property('automated', True)
task.set_editor_property('options', options)
task.set_editor_property('replace_existing', True)
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
asset_paths = task.get_editor_property("imported_object_paths")
success = asset_paths and len(asset_paths) > 0
|
import unreal
def open_base_materials_from_selection():
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
selected_assets = unreal.EditorUtilityLibrary.get_selected_assets()
base_materials = []
for asset in selected_assets:
if isinstance(asset, unreal.MaterialInstance): # MI๋ง ์ฒ๋ฆฌ
base_material = asset.get_base_material()
if base_material and base_material not in base_materials:
base_materials.append(base_material)
if base_materials:
asset_tools.open_editor_for_assets(base_materials)
else:
unreal.log("No valid Material Instances selected.")
open_base_materials_from_selection()
|
"""
This script describes how to open and import USD Stages in ways that automatically configure the generated StaticMeshes
for Nanite.
In summary, there are two different settings you can use: The NaniteTriangleThreshold property (any static mesh
generated with this many triangles or more will be enabled for Nanite), and the 'unrealNanite' property (force Nanite
enabled or disabled on a per-prim basis).
Note that there are two cases in which Nanite will not be enabled for generated StaticMeshes, even when using the
above settings:
- When the Mesh prim is configured for multiple LODs (by using Variants and Variant Sets);
- When the generated StaticMesh would have led to more than 64 material slots.
Additionally, when exporting existing UStaticMeshes to USD Mesh prims we will automatically emit the
`uniform token unrealNanite = "enable"` attribute whenever the source UStaticMesh has "Enable Nanite Support" checked.
The text below describes a "sample_scene.usda" file (some attributes omitted for brevity):
# Assuming a triangle threshold of 2000, this prim will generate a StaticMesh with Nanite disabled:
def Mesh "small_mesh"
{
# Mesh data with 1000 triangles
}
# Assuming a triangle threshold of 2000, this prim will generate a StaticMesh with Nanite enabled:
def Mesh "large_mesh"
{
# Mesh data with 5000 triangles
}
# Assuming a triangle threshold of 2000, this prim will generate a StaticMesh with Nanite enabled:
def Mesh "small_but_enabled"
{
# Mesh data with 1000 triangles
uniform token unrealNanite = "enable"
}
# Assuming a triangle threshold of 2000, this prim will generate a StaticMesh with Nanite disabled:
def Mesh "large_but_disabled"
{
# Mesh data with 5000 triangles
uniform token unrealNanite = "disable"
}
# Assuming a triangle threshold of 2000 and that we're collapsing prims with "component" kind,
# this prim hierarchy will lead to a StaticMesh with Nanite enabled, as the final StaticMesh will end up with 2000
# total triangles.
def Xform "nanite_collapsed" ( kind = "component" )
{
def Mesh "small_mesh_1"
{
# Mesh data with 1000 triangles
}
def Mesh "small_mesh_2"
{
# Mesh data with 1000 triangles
}
}
# Assuming a triangle threshold of 2000 and that we're collapsing prims with "component" kind,
# this prim hierarchy will lead to a StaticMesh with Nanite enabled. The combined triangle count is below the
# threshold, however we have an 'unrealNanite' opinion on the root of the collapsed hierarchy that overrides it.
# Note that in case of collapsing, we will only consider the 'unrealNanite' attribute on the root, and will
# disregard opinions for it specified on each individual Mesh prim.
def Xform "nanite_collapsed" ( kind = "component" )
{
uniform token unrealNanite = "enable"
def Mesh "small_mesh_1"
{
# Mesh data with 500 triangles
}
def Mesh "small_mesh_2"
{
# Mesh data with 500 triangles
}
}
"""
import unreal
ROOT_LAYER_FILENAME = r"/project/.usda"
DESTINATION_CONTENT_PATH = r"/project/"
def specify_nanite_when_importing():
""" Describes how to specify the Nanite triangle threshold when importing a stage """
options = unreal.UsdStageImportOptions()
options.import_actors = True
options.import_geometry = True
options.import_skeletal_animations = True
options.nanite_triangle_threshold = 2000 # Use your threshold here
task = unreal.AssetImportTask()
task.set_editor_property('filename', ROOT_LAYER_FILENAME)
task.set_editor_property('destination_path', DESTINATION_CONTENT_PATH)
task.set_editor_property('automated', True)
task.set_editor_property('options', options)
task.set_editor_property('replace_existing', True)
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
asset_paths = task.get_editor_property("imported_object_paths")
success = asset_paths and len(asset_paths) > 0
def specify_nanite_when_opening():
""" Describes how to specify the Nanite triangle threshold when opening a stage """
editor_actor_subsystem = unreal.get_editor_subsystem(unreal.EditorActorSubsystem)
stage_actor = editor_actor_subsystem.spawn_actor_from_class(unreal.UsdStageActor, unreal.Vector())
# Either one of the two lines below should work, without any difference
stage_actor.set_editor_property('nanite_triangle_threshold', 2000)
stage_actor.set_nanite_triangle_threshold(2000)
stage_actor.set_editor_property('root_layer', unreal.FilePath(ROOT_LAYER_FILENAME))
|
# Copyright Epic Games, Inc. All Rights Reserved
import os
from abc import abstractmethod
import traceback
from deadline_rpc.client import RPCClient
import unreal
import __main__
class _RPCContextManager:
"""
Context manager used for automatically marking a task as complete after
the statement is done executing
"""
def __init__(self, proxy, task_id):
"""
Constructor
"""
# RPC Client proxy
self._proxy = proxy
# Current task id
self._current_task_id = task_id
def __enter__(self):
return self._proxy
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Called when the context manager exits
"""
# Tell the server the task is complete
self._proxy.complete_task(self._current_task_id)
class BaseRPC:
"""
Base class for communicating with a Deadline RPC server. It is
recommended this class is subclassed for any script that need to
communicate with deadline. The class automatically handles connecting and
marking tasks as complete when some abstract methods are implemented
"""
def __init__(self, port=None, ignore_rpc=False, verbose=False):
"""
This allows you to get an instance of the class without expecting
an automatic connection to a rpc server. This will allow you to have
a class that can both be executed in a deadline commandline interface or
as a class instance.
:param port: Optional port to connect to
:param ignore_rpc: Flag to short circuit connecting to a rpc server
"""
self._ignore_rpc = ignore_rpc
self._proxy = None
if not self._ignore_rpc:
if not port:
try:
port = os.environ["DEADLINE_RPC_PORT"]
except KeyError:
raise RuntimeError(
"There was no port specified for the rpc server"
)
self._port = int(port)
# Make a connection to the RPC server
self._proxy = self.__establish_connection()
self.current_task_id = -1 # Setting this to -1 allows us to
# render the first task. i.e task 0
self._get_next_task = True
self._tick_handle = None
self._verbose_logging = verbose
# Set up a property to notify the class when a task is complete
self.__create_on_task_complete_global()
self.task_complete = False
self._sent_task_status = False
# Start getting tasks to process
self._execute()
@staticmethod
def __create_on_task_complete_global():
"""
Creates a property in the globals that allows fire and forget tasks
to notify the class when a task is complete and allowing it to get
the next task
:return:
"""
if not hasattr(__main__, "__notify_task_complete__"):
__main__.__notify_task_complete__ = False
return __main__.__notify_task_complete__
def __establish_connection(self):
"""
Makes a connection to the Deadline RPC server
"""
print(f"Connecting to rpc server on port `{self._port}`")
try:
_client = RPCClient(port=int(self._port))
proxy = _client.proxy
proxy.connect()
except Exception:
raise
else:
if not proxy.is_connected():
raise RuntimeError(
"A connection could not be made with the server"
)
print(f"Connection to server established!")
return proxy
def _wait_for_next_task(self, delta_seconds):
"""
Checks to see if there are any new tasks and executes when there is
:param delta_seconds:
:return:
"""
# skip if our task is the same as previous
if self.proxy.get_task_id() == self.current_task_id:
if self._verbose_logging:
print("Waiting on next task..")
return
print("New task received!")
# Make sure we are explicitly told the task is complete by clearing
# the globals when we get a new task
__main__.__notify_task_complete__ = False
self.task_complete = False
# Unregister the tick handle and execute the task
unreal.unregister_slate_post_tick_callback(self._tick_handle)
self._tick_handle = None
# Set the current task and execute
self.current_task_id = self.proxy.get_task_id()
self._get_next_task = False
print(f"Executing task `{self.current_task_id}`")
self.proxy.set_status_message("Executing task command")
# Execute the next task
# Make sure we fail the job if we encounter any exceptions and
# provide the traceback to the proxy server
try:
self.execute()
except Exception:
trace = traceback.format_exc()
print(trace)
self.proxy.fail_render(trace)
raise
# Start a non-blocking loop that waits till its notified a task is
# complete
self._tick_handle = unreal.register_slate_post_tick_callback(
self._wait_on_task_complete
)
def _wait_on_task_complete(self, delta_seconds):
"""
Waits till a task is mark as completed
:param delta_seconds:
:return:
"""
if self._verbose_logging:
print("Waiting on task to complete..")
if not self._sent_task_status:
self.proxy.set_status_message("Waiting on task completion..")
self._sent_task_status = True
if __main__.__notify_task_complete__ or self.task_complete:
# Exiting the waiting loop
unreal.unregister_slate_post_tick_callback(self._tick_handle)
self._tick_handle = None
print("Task marked complete. Getting next Task")
self.proxy.set_status_message("Task complete!")
# Reset the task status notification
self._sent_task_status = False
# Automatically marks a task complete when the execute function
# exits
with _RPCContextManager(self.proxy, self.current_task_id):
self._get_next_task = True
# This will allow us to keep getting tasks till the process is
# closed
self._execute()
def _execute(self):
"""
Start the execution process
"""
if self._get_next_task and not self._ignore_rpc:
# register a callback with the editor that will check and execute
# the task on editor tick
self._tick_handle = unreal.register_slate_post_tick_callback(
self._wait_for_next_task
)
@property
def proxy(self):
"""
Returns an instance of the Client proxy
:return:
"""
if not self._proxy:
raise RuntimeError("There is no connected proxy!")
return self._proxy
@property
def is_connected(self):
"""
Property that returns if a connection was made with the server
:return:
"""
return self.proxy.is_connected()
@abstractmethod
def execute(self):
"""
Abstract methods that is executed to perform a task job/command.
This method must be implemented when communicating with a Deadline
RPC server
:return:
"""
pass
|
# Copyright Epic Games, Inc. All Rights Reserved
# Built-in
import argparse
import re
from pathlib import Path
from getpass import getuser
from collections import OrderedDict
# Internal
from deadline_service import get_global_deadline_service_instance
from deadline_job import DeadlineJob
from deadline_menus import DeadlineToolBarMenu
from deadline_utils import get_deadline_info_from_preset
# Third Party
import unreal
# Editor Utility Widget path
# NOTE: This is very fragile and can break if naming or pathing changes
EDITOR_UTILITY_WIDGET = "/project/"
def _launch_queue_asset_submitter():
"""
Callback to execute to launch the queue asset submitter
"""
unreal.log("Launching queue submitter.")
submitter_widget = unreal.EditorAssetLibrary.load_asset(EDITOR_UTILITY_WIDGET)
# Get editor subsystem
subsystem = unreal.get_editor_subsystem(unreal.EditorUtilitySubsystem)
# Spawn the submitter widget
subsystem.spawn_and_register_tab(submitter_widget)
def register_menu_action():
"""
Creates the toolbar menu
"""
if not _validate_euw_asset_exists():
unreal.log_warning(
f"EUW `{EDITOR_UTILITY_WIDGET}` does not exist in the Asset registry!"
)
return
toolbar = DeadlineToolBarMenu()
toolbar.register_submenu(
"SubmitMRQAsset",
_launch_queue_asset_submitter,
label_name="Submit Movie Render Queue Asset",
description="Submits a Movie Render Queue asset to Deadline"
)
def _validate_euw_asset_exists():
"""
Make sure our reference editor utility widget exists in
the asset registry
:returns: Array(AssetData) or None
"""
asset_registry = unreal.AssetRegistryHelpers.get_asset_registry()
asset_data = asset_registry.get_assets_by_package_name(
EDITOR_UTILITY_WIDGET,
include_only_on_disk_assets=True
)
return True if asset_data else False
def _execute_submission(args):
"""
Creates and submits the queue asset as a job to Deadline
:param args: Commandline args
"""
unreal.log("Executing job submission")
job_info, plugin_info = get_deadline_info_from_preset(
job_preset=unreal.load_asset(args.submission_job_preset)
)
# Due to some odd behavior in how Unreal passes string to the argparse,
# it adds extra quotes to the string, so we will strip the quotes out to get
# a single string representation.
batch_name = args.batch_name[0].strip('"')
# Update the Job Batch Name
job_info["BatchName"] = batch_name
# Set the name of the job if one is not set
if not job_info.get("Name"):
job_info["Name"] = Path(args.queue_asset).stem
# Set the Author of the job
if not job_info.get("UserName"):
job_info["UserName"] = getuser()
# Arguments to pass to the executable.
command_args = []
# Append all of our inherited command line arguments from the editor.
in_process_executor_settings = unreal.get_default_object(
unreal.MoviePipelineInProcessExecutorSettings
)
inherited_cmds = in_process_executor_settings.inherited_command_line_arguments
# Sanitize the commandline by removing any execcmds that may
# have passed through the commandline.
# We remove the execcmds because, in some cases, users may execute a
# script that is local to their editor build for some automated
# workflow but this is not ideal on the farm. We will expect all
# custom startup commands for rendering to go through the `Start
# Command` in the MRQ settings.
inherited_cmds = re.sub(
".(?P<cmds>-execcmds=[\w\W]+[\'\"])",
"",
inherited_cmds
)
command_args.extend(inherited_cmds.split(" "))
command_args.extend(
in_process_executor_settings.additional_command_line_arguments.split(
" "
)
)
# Build out custom queue command that will be used to render the queue on
# the farm.
queue_cmds = [
"py",
"mrq_cli.py",
"queue",
str(args.queue_asset),
"--remote",
"--cmdline",
"--batch_name",
batch_name,
"--deadline_job_preset",
str(args.remote_job_preset)
]
command_args.extend(
[
"-nohmd",
"-windowed",
"-ResX=1280",
"-ResY=720",
'-execcmds="{cmds}"'.format(cmds=" ".join(queue_cmds))
]
)
# Append the commandline args from the deadline plugin info
command_args.extend(plugin_info.get("CommandLineArguments", "").split(" "))
# Sanitize the commandline args
command_args = [arg for arg in command_args if arg not in [None, "", " "]]
# Remove all duplicates from the command args
full_cmd_args = " ".join(list(OrderedDict.fromkeys(command_args)))
# Get the current launched project file
if unreal.Paths.is_project_file_path_set():
# Trim down to just "Game.uproject" instead of absolute path.
game_name_or_project_file = (
unreal.Paths.convert_relative_path_to_full(
unreal.Paths.get_project_file_path()
)
)
else:
raise RuntimeError(
"Failed to get a project name. Please specify a project!"
)
if not plugin_info.get("ProjectFile"):
project_file = plugin_info.get("ProjectFile", game_name_or_project_file)
plugin_info["ProjectFile"] = project_file
# Update the plugin info. "CommandLineMode" tells Deadline to not use an
# interactive process to execute the job but launch it like a shell
# command and wait for the process to exit. `--cmdline` in our
# commandline arguments will tell the editor to shut down when the job is
# complete
plugin_info.update(
{
"CommandLineArguments": full_cmd_args,
"CommandLineMode": "true"
}
)
# Create a Deadline job from the selected preset library
deadline_job = DeadlineJob(job_info, plugin_info)
deadline_service = get_global_deadline_service_instance()
# Submit the Deadline Job
job_id = deadline_service.submit_job(deadline_job)
unreal.log(f"Deadline job submitted. JobId: {job_id}")
if __name__ == "__main__":
unreal.log("Executing queue submitter action")
parser = argparse.ArgumentParser(
description="Submits queue asset to Deadline",
add_help=False,
)
parser.add_argument(
"--batch_name",
type=str,
nargs='+',
help="Deadline Batch Name"
)
parser.add_argument(
"--submission_job_preset",
type=str,
help="Submitter Deadline Job Preset"
)
parser.add_argument(
"--remote_job_preset",
type=str,
help="Remote Deadline Job Preset"
)
parser.add_argument(
"--queue_asset",
type=str,
help="Movie Pipeline Queue Asset"
)
parser.set_defaults(func=_execute_submission)
# Parse the arguments and execute the function callback
arguments = parser.parse_args()
arguments.func(arguments)
|
import unreal
# ๋ผ์ดํธ ์กํฐ๋ฅผ ์๋ธ ์ํ์ค์ ์ฐ๊ฒฐ
def link_light_actors_to_sequence(LIT_sub_sequence_path):
# EditorActorSubsystem์ ์ธ์คํด์ค๋ฅผ ๊ฐ์ ธ์ต๋๋ค
editor_actor_subsystem = unreal.get_editor_subsystem(unreal.EditorActorSubsystem)
allActors = editor_actor_subsystem.get_all_level_actors()
# ํน์ ํด๋์ค ํ์
์ ํด๋นํ๋ ๋ชจ๋ ์กํฐ๋ฅผ ์ฐพ์ต๋๋ค
find_lightActor_classList = [unreal.DirectionalLight, unreal.SkyLight, unreal.PostProcessVolume, unreal.ExponentialHeightFog, unreal.SkyAtmosphere]
possessableActor_classList = []
for cls in find_lightActor_classList:
possessableActor_class = unreal.EditorFilterLibrary.by_class(allActors, cls)
print(f"Filtered Class : {possessableActor_class}")
possessableActor_classList.extend(possessableActor_class)
# ํด๋น ํ์
์ด ์์ผ๋ฉด Spawnable๋ก ์ถ๊ฐ
spawnableActor_classList = []
for cls in find_lightActor_classList:
# ํด๋น ํด๋์ค์ ์กํฐ๊ฐ possessableActor_classList์ ์์ผ๋ฉด spawnable๋ก ์ถ๊ฐ
if not any(isinstance(actor, cls) for actor in possessableActor_classList):
spawnableActor_classList.append(cls)
# Level์ ์๋ Actor๋ฅผ Level์์ Sequence๋ก ์ฐ๊ฒฐ
sub_sequence = unreal.load_asset(LIT_sub_sequence_path, unreal.LevelSequence)
for cls in possessableActor_classList:
sub_sequence.add_possessable(cls)
print(f"{cls} has been created and linked to the sequence.")
# Level์ ์๋ Actor๋ฅผ Spawnable๋ก ์ถ๊ฐ
for cls in spawnableActor_classList:
sub_sequence.add_spawnable_from_class(cls)
print(f"{cls} has been created and linked to the sequence.")
# ๋ณ๊ฒฝ ์ฌํญ ์ ์ฅ
# unreal.EditorAssetLibrary.save_loaded_asset(sub_sequence)
# ๋ฌธ์์ด์ ๋ถ๋ฆฌ์ธ ๊ฐ์ผ๋ก ๋ณํ
def str_to_bool(s):
if s == "true":
return True
elif s == "false":
return False
else:
raise ValueError(f"Cannot convert {s} to a boolean value")
# ์๋ธ ์ํ์ค ์์
์ด๋ฆ ์์ฑ
def get_unique_asset_name(part, shot_name, project_name, ProjectName_CheckBox, directory):
# Unreal Engine์ ์์
๊ฒฝ๋ก ํ์์ผ๋ก ๋ณ๊ฒฝ
asset_directory = directory.replace("\\", "/") # ~/LIT
if ProjectName_CheckBox == "true":
base_name = f"{part}_SUB_{project_name}_{shot_name}" # LIT_SUB_2024_Enchantress_Shot_02_03
else:
base_name = f"{part}_SUB_{shot_name}" # LIT_SUB_Shot_02_03
# ์ด๊ธฐ ๋ฒ์ ๋ฒํธ ์ค์
counter = 2
asset_name = f"{base_name}_{counter:02d}" # LIT_SUB_Shot_02_03_01
# ๋๋ ํ ๋ฆฌ ๋ด์ ๋ชจ๋ ์์
๊ฒฝ๋ก๋ฅผ ๊ฐ์ ธ์ด
existing_assets = unreal.EditorAssetLibrary.list_assets(asset_directory)
# ์์
๊ฒฝ๋ก์์ ์ด๋ฆ์ ์ถ์ถํ์ฌ ํ์ธ
for asset_path in existing_assets:
asset = unreal.EditorAssetLibrary.load_asset(asset_path)
exist_asset_name = unreal.SystemLibrary.get_object_name(asset)
if exist_asset_name.startswith(base_name):
try:
# ๋ฒ์ ๋ฒํธ ์ถ์ถ ๋ฐ ๋น๊ต
version_str = exist_asset_name.split("_")[-1]
asset_counter = int(version_str)
counter = max(counter, asset_counter + 1)
except ValueError:
continue
# ์ต์ข
์์
์ด๋ฆ ์์ฑ
asset_name = f"{base_name}_{counter - 1:02d}"
asset_path = f"{asset_directory}/{asset_name}"
# ์์
์ด ์ด๋ฏธ ์กด์ฌํ๋ฉด ๋ฉ์ธ์ง ํ์ ๋ฐ ์นด์ดํฐ ์ฆ๊ฐ
if unreal.EditorAssetLibrary.does_asset_exist(asset_path):
exist_message = unreal.EditorDialog.show_message(
message_type=unreal.AppMsgType.YES_NO,
title="๊ฒฝ๊ณ ",
message=f"{base_name}_{counter - 1:02d} : ์ํ์ค๊ฐ ์ด๋ฏธ ์กด์ฌํฉ๋๋ค.\n{base_name}_{counter:02d} : ์ด๋ฆ์ผ๋ก ์์ฑํ์๊ฒ ์ต๋๊น?\n์๋์๋ฅผ ์ ํํ๋ฉด ์์
์ด ์ทจ์๋ฉ๋๋ค."
)
if exist_message == unreal.AppReturnType.YES:
asset_name = f"{base_name}_{counter:02d}"
else:
asset_name = "break"
return asset_name
def create_shot(ProjectSelected, ProjectName_CheckBox, LIT_CheckBox, FX_CheckBox, ANI_CheckBox):
# ํ๋ก์ ํธ ์ ํ ์ฝค๋ณด๋ฐ์ค์์ ์ ํ๋ ํ๋ก์ ํธ ์ด๋ฆ ๊ฐ์ ธ์ค๊ธฐ
game_name = ProjectSelected
print(f"game_name : {game_name}")
# ์ต์ ํ๋์ ํํธ๋ฅผ ์ ํํ๋์ง ํ์ธ
if LIT_CheckBox == "false" and FX_CheckBox == "false" and ANI_CheckBox == "false":
unreal.EditorDialog.show_message(
message_type=unreal.AppMsgType.OK,
title="๊ฒฝ๊ณ ",
message="์ต์ ํ๋์ ํํธ๋ฅผ ์ ํํด์ฃผ์ธ์."
)
return
# ์ฒดํฌ๋ฐ์ค ์ ํ ์ฌ๋ถ ๋์
๋๋ฆฌ ์์ฑ
checkBox_dict = {
"LIT": str_to_bool(LIT_CheckBox),
"FX": str_to_bool(FX_CheckBox),
"ANI": str_to_bool(ANI_CheckBox)
}
partList = ["LIT", "FX", "ANI"]
partSelected = []
for part in partList:
if checkBox_dict[part]:
partSelected.append(part)
print(f"partSelected : {partSelected}")
# ์ ๋ ๋ฒจ ์ํ์ค ์์
์์ฑ
focused_level_sequence = unreal.LevelSequenceEditorBlueprintLibrary.get_focused_level_sequence()
shot_name = focused_level_sequence.get_name()
print(f"shot_name : {shot_name}")
shot_path = focused_level_sequence.get_path_name()
shot_package_path = "/".join(shot_path.split("/")[:-1]) #/project/
print(f"shot_package_path : {shot_package_path}")
# ์์ ํด๋ ์ด๋ฆ์ project_name์ผ๋ก ๋ณ์ ์ถ๊ฐ
project_name = shot_package_path.split("/")[-2]
print(f"project_name : {project_name}")
for part in partSelected:
sub_package_path = shot_package_path + f"/Subsequence/{part}"
asset_name = get_unique_asset_name(part, shot_name, project_name, ProjectName_CheckBox, sub_package_path)
# ์๋ธ ์ํ์ค ์์
์์ฑ
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
if asset_name == "break":
break
else:
sub_sequence_asset = asset_tools.create_asset(
asset_name=asset_name,
package_path=sub_package_path,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
# ๋ฉ์ธ ๋ ๋ฒจ ์ํ์ค ๋ก๋
main_sequence_path = shot_path
print(f"main_sequence_path : {main_sequence_path}")
main_sequence = unreal.load_asset(main_sequence_path, unreal.LevelSequence)
print(f"main_sequence : {main_sequence}")
# ๋ฉ์ธ ์ํ์ค์ ์๋ธ์ํ์ค ํธ๋ ์ถ๊ฐ
sub_track = main_sequence.add_track(unreal.MovieSceneSubTrack)
# ์์๊ณผ ์ข
๋ฃ ํ๋ ์ ๊ฐ์ ธ์ค๊ธฐ
playback_range = main_sequence.get_playback_range()
print(f"playback_range : {playback_range}")
playback_start = playback_range.inclusive_start
playback_end = playback_range.exclusive_end
# ์๋ธ์ํ์ค ์น์
์ถ๊ฐ ๋ฐ ์ค์
sub_section = sub_track.add_section()
sub_section.set_sequence(sub_sequence_asset)
sub_section.set_range(playback_start, playback_end) # ์์๊ณผ ์ข
๋ฃ ํ๋ ์ ์ค์
# part๊ฐ LIT์ผ ๊ฒฝ์ฐ, ์๋ธ ์ํ์ค์ ๋ผ์ดํธ ์กํฐ ์ฐ๊ฒฐ
if part == "LIT":
LIT_sub_sequence_path = sub_sequence_asset.get_path_name()
link_light_actors_to_sequence(LIT_sub_sequence_path)
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" An example script that uses the API and
HoudiniEngineV2.asyncprocessor.ProcessHDA. ProcessHDA is configured with the
asset to instantiate, as well as 2 inputs: a geometry input (a cube) and a
curve input (a helix).
ProcessHDA is then activiated upon which the asset will be instantiated,
inputs set, and cooked. The ProcessHDA class's on_post_processing() function is
overridden to fetch the input structure and logged. The other state/phase
functions (on_pre_instantiate(), on_post_instantiate() etc) are overridden to
simply log the function name, in order to observe progress in the log.
"""
import math
import unreal
from HoudiniEngineV2.asyncprocessor import ProcessHDA
_g_processor = None
class ProcessHDAExample(ProcessHDA):
@staticmethod
def _print_api_input(in_input):
print('\t\tInput type: {0}'.format(in_input.__class__))
print('\t\tbKeepWorldTransform: {0}'.format(in_input.keep_world_transform))
print('\t\tbImportAsReference: {0}'.format(in_input.import_as_reference))
if isinstance(in_input, unreal.HoudiniPublicAPIGeoInput):
print('\t\tbPackBeforeMerge: {0}'.format(in_input.pack_before_merge))
print('\t\tbExportLODs: {0}'.format(in_input.export_lo_ds))
print('\t\tbExportSockets: {0}'.format(in_input.export_sockets))
print('\t\tbExportColliders: {0}'.format(in_input.export_colliders))
elif isinstance(in_input, unreal.HoudiniPublicAPICurveInput):
print('\t\tbCookOnCurveChanged: {0}'.format(in_input.cook_on_curve_changed))
print('\t\tbAddRotAndScaleAttributesOnCurves: {0}'.format(in_input.add_rot_and_scale_attributes_on_curves))
input_objects = in_input.get_input_objects()
if not input_objects:
print('\t\tEmpty input!')
else:
print('\t\tNumber of objects in input: {0}'.format(len(input_objects)))
for idx, input_object in enumerate(input_objects):
print('\t\t\tInput object #{0}: {1}'.format(idx, input_object))
if hasattr(in_input, 'supports_transform_offset') and in_input.supports_transform_offset():
print('\t\t\tObject Transform Offset: {0}'.format(in_input.get_input_object_transform_offset(idx)))
if isinstance(input_object, unreal.HoudiniPublicAPICurveInputObject):
print('\t\t\tbClosed: {0}'.format(input_object.is_closed()))
print('\t\t\tCurveMethod: {0}'.format(input_object.get_curve_method()))
print('\t\t\tCurveType: {0}'.format(input_object.get_curve_type()))
print('\t\t\tReversed: {0}'.format(input_object.is_reversed()))
print('\t\t\tCurvePoints: {0}'.format(input_object.get_curve_points()))
def on_failure(self):
print('on_failure')
global _g_processor
_g_processor = None
def on_complete(self):
print('on_complete')
global _g_processor
_g_processor = None
def on_pre_instantiation(self):
print('on_pre_instantiation')
def on_post_instantiation(self):
print('on_post_instantiation')
def on_post_auto_cook(self, cook_success):
print('on_post_auto_cook, success = {0}'.format(cook_success))
def on_pre_process(self):
print('on_pre_process')
def on_post_processing(self):
print('on_post_processing')
# Fetch inputs, iterate over it and log
node_inputs = self.asset_wrapper.get_inputs_at_indices()
parm_inputs = self.asset_wrapper.get_input_parameters()
if not node_inputs:
print('No node inputs found!')
else:
print('Number of node inputs: {0}'.format(len(node_inputs)))
for input_index, input_wrapper in node_inputs.items():
print('\tInput index: {0}'.format(input_index))
self._print_api_input(input_wrapper)
if not parm_inputs:
print('No parameter inputs found!')
else:
print('Number of parameter inputs: {0}'.format(len(parm_inputs)))
for parm_name, input_wrapper in parm_inputs.items():
print('\tInput parameter name: {0}'.format(parm_name))
self._print_api_input(input_wrapper)
def on_post_auto_bake(self, bake_success):
print('on_post_auto_bake, succes = {0}'.format(bake_success))
def get_test_hda_path():
return '/project/.copy_to_curve_1_0'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def get_geo_asset_path():
return '/project/.Cube'
def get_geo_asset():
return unreal.load_object(None, get_geo_asset_path())
def build_inputs():
print('configure_inputs')
# get the API singleton
houdini_api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
node_inputs = {}
# Create a geo input
geo_input = houdini_api.create_empty_input(unreal.HoudiniPublicAPIGeoInput)
# Set the input objects/assets for this input
geo_object = get_geo_asset()
geo_input.set_input_objects((geo_object, ))
# store the input data to the HDA as node input 0
node_inputs[0] = geo_input
# Create a curve input
curve_input = houdini_api.create_empty_input(unreal.HoudiniPublicAPICurveInput)
# Create a curve wrapper/helper
curve_object = unreal.HoudiniPublicAPICurveInputObject(curve_input)
# Make it a Nurbs curve
curve_object.set_curve_type(unreal.HoudiniPublicAPICurveType.NURBS)
# Set the points of the curve, for this example we create a helix
# consisting of 100 points
curve_points = []
for i in range(100):
t = i / 20.0 * math.pi * 2.0
x = 100.0 * math.cos(t)
y = 100.0 * math.sin(t)
z = i
curve_points.append(unreal.Transform([x, y, z], [0, 0, 0], [1, 1, 1]))
curve_object.set_curve_points(curve_points)
# Set the curve wrapper as an input object
curve_input.set_input_objects((curve_object, ))
# Store the input data to the HDA as node input 1
node_inputs[1] = curve_input
return node_inputs
def run():
# Create the processor with preconfigured inputs
global _g_processor
_g_processor = ProcessHDAExample(
get_test_hda(), node_inputs=build_inputs())
# Activate the processor, this will starts instantiation, and then cook
if not _g_processor.activate():
unreal.log_warning('Activation failed.')
else:
unreal.log('Activated!')
if __name__ == '__main__':
run()
|
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import unreal
from Utilities.Utils import Singleton
import random
if sys.platform == "darwin":
import webbrowser
class ChameleonGallery(metaclass=Singleton):
def __init__(self, jsonPath):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_scrollbox = "ScrollBox"
self.ui_crumbname = "SBreadcrumbTrailA"
self.ui_image = "SImageA"
self.ui_image_local = "SImage_ImageFromRelativePath"
self.ui_imageB = "SImage_ImageFromPath"
self.ui_progressBar = "ProgressBarA"
self.ui_drop_target_text_box = "DropResultBox"
self.ui_python_not_ready = "IsPythonReadyImg"
self.ui_python_is_ready = "IsPythonReadyImgB"
self.ui_is_python_ready_text = "IsPythonReadyText"
self.ui_details_view = "DetailsView"
self.imageFlagA = 0
self.imageFlagB = 0
# set data in init
self.set_random_image_data()
self.data.set_combo_box_items('CombBoxA', ['1', '3', '5'])
self.data.set_object(self.ui_details_view, self.data)
print("ChameleonGallery.Init")
def mark_python_ready(self):
print("mark_python_ready call")
self.data.set_visibility(self.ui_python_not_ready, "Collapsed")
self.data.set_visibility(self.ui_python_is_ready, "Visible")
self.data.set_text(self.ui_is_python_ready_text, "Python Path Ready.")
def push_breadcrumb(self):
count = self.data.get_breadcrumbs_count_string(self.ui_crumbname)
strs = "is breadcrumb tail from alice in wonder world"
label = strs.split()[count % len(strs.split())]
self.data.push_breadcrumb_string(self.ui_crumbname, label, label)
def set_random_image_data(self):
width = 64
height = 64
colors = [unreal.LinearColor(1, 1, 1, 1) if random.randint(0, 1) else unreal.LinearColor(0, 0, 0, 1) for _ in range(width * height)]
self.data.set_image_pixels(self.ui_image, colors, width, height)
def set_random_progress_bar_value(self):
self.data.set_progress_bar_percent(self.ui_progressBar,random.random())
def change_local_image(self):
self.data.set_image_from(self.ui_image_local, ["Images/ChameleonLogo_c.png", "Images/ChameleonLogo_b.png"][self.imageFlagA])
self.imageFlagA = (self.imageFlagA + 1) % 2
def change_image(self):
self.data.set_image_from_path(self.ui_imageB, ["PythonChameleonIcon_128x.png", "Icon128.png"][self.imageFlagB])
self.imageFlagB = (self.imageFlagB + 1) % 2
def change_comboBox_items(self):
offset = random.randint(1, 10)
items = [str(v+offset) for v in range(random.randint(1, 10))]
self.data.set_combo_box_items("CombBoxA", items)
def launch_other_galleries(self):
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'auto_gen/border_brushes_Gallery.json')):
unreal.PythonBPLib.notification("auto-generated Galleries not exists", info_level=1)
return
gallery_paths = ['ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json']
bLaunch = unreal.PythonBPLib.confirm_dialog(f'Open Other {len(gallery_paths)} Galleries? You can close them with the "Close all Gallery" Button' , "Open Other Galleries", with_cancel_button=False)
if bLaunch:
with unreal.ScopedSlowTask(len(gallery_paths), "Spawn Actors") as slow_task:
slow_task.make_dialog(True)
for i, p in enumerate(gallery_paths):
slow_task.enter_progress_frame(1, f"Launch Gallery: {p}")
unreal.ChameleonData.launch_chameleon_tool(p)
def request_close_other_galleries(self):
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'auto_gen/border_brushes_Gallery.json')):
unreal.PythonBPLib.notification("auto-generated Galleries not exists", info_level=1)
return
gallery_paths = ['ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json']
for i, p in enumerate(gallery_paths):
unreal.ChameleonData.request_close(p)
# unreal.ChameleonData.request_close('/project/.json')
exists_tools_var = [globals()[x] for x in globals() if "Utilities.Utils.Singleton" in str(type(type(globals()[x])))]
def on_drop(self, assets, assets_folders, actors):
str_for_show = ""
for items, name in zip([assets, assets_folders, actors], ["Assets:", "Assets Folders:", "Actors:"]):
if items:
str_for_show += f"{name}\n"
for item in items:
str_for_show += f"\t{item}\n"
self.data.set_text(self.ui_drop_target_text_box, str_for_show)
print(f"str_for_show: {str_for_show}")
def on_drop_func(self, *args, **kwargs):
print(f"args: {args}")
print(f"kwargs: {kwargs}")
str_for_show = ""
for name, items in kwargs.items():
if items:
str_for_show += f"{name}:\n"
for item in items:
str_for_show += f"\t{item}\n"
self.data.set_text(self.ui_drop_target_text_box, str_for_show)
def get_full_size_of_this_chameleon(self):
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
scrollbox_offsets = self.data.get_scroll_box_offsets(self.ui_scrollbox)
height_full = scrollbox_offsets["ScrollOffsetOfEnd"] / (1.0-scrollbox_offsets["viewFraction"])
height_full += 48
print(f"delta: {height_full} - {round(height_full)}")
return current_size.x, round(height_full)
def on_button_ChangeTabSize_click(self, offset_pixel):
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
print(f"currentSize: {current_size}")
offsets = self.data.get_scroll_box_offsets(self.ui_scrollbox)
print(offsets)
if current_size:
current_size.x += offset_pixel
unreal.ChameleonData.set_chameleon_window_size("ChameleonGallery/ChameleonGallery.json", current_size)
def on_button_FlashWindow_click(self):
unreal.ChameleonData.flash_chameleon_window("ChameleonGallery/ChameleonGallery.json")
def on_button_Snapshot_click(self):
full_size = self.get_full_size_of_this_chameleon()
print(f"try save snapshot @ {full_size}")
saved_file_path = unreal.ChameleonData.snapshot_chameleon_window(self.jsonPath, unreal.Vector2D(*full_size))
if saved_file_path:
unreal.PythonBPLib.notification(f"UI Snapshot Saved:", hyperlink_text = saved_file_path
, on_hyperlink_click_command = f'chameleon_gallery.explorer("{saved_file_path}")')
else:
unreal.PythonBPLib.notification(f"Save UI snapshot failed.", info_level = 1)
def explorer(self, file_path):
if sys.platform == "darwin":
webbrowser.open(os.path.dirname(file_path))
else:
file_path = file_path.replace("/", "\\")
subprocess.call('explorer "{}" '.format(os.path.dirname(file_path)))
def set_selected_actor_to_details_view(self):
selected = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
if selected:
self.data.set_object(self.ui_details_view, selected[0])
else:
print("Selected None")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.