I have the drawBtn to draw sphere on the plane , then i have the lineNode , that continuously updates the line position according to the user camera so that they might know where they want to add their new sphere to create a shape.
class ViewController: UIViewController , ARSCNViewDelegate, ARCoachingOverlayViewDelegate, ARSessionDelegate{
@IBOutlet weak var imageview: UIImageView!
@IBOutlet weak var sceneView: ARSCNView!
var nodes: [SCNNode] = []
var linenodes : [SCNNode] = []
var startingNode : SCNNode!
var lineNode : LineNode!
let coachingOverlay = ARCoachingOverlayView()
var isEditingEnabled = true
var session: ARSession {
return sceneView.session
}
// MARK: - View Didload
override func viewDidLoad() {
super.viewDidLoad()
//Setup an AR SceneView Session
sceneView.delegate = self
sceneView.session.delegate = self
let Config = ARWorldTrackingConfiguration()
Config.planeDetection = [.horizontal, .vertical]
sceneView.session.run(Config, options: [.resetTracking, .removeExistingAnchors])
let tapgesture = UITapGestureRecognizer(target: self , action: #selector(handleTap))
imageview.addGestureRecognizer(tapgesture)
imageview.isUserInteractionEnabled = true
}
The Draw btn handles the raycast result and add the nodes to nodes Array , // MARK: - Draw the spheres to create a Shape
@IBAction func DrawBtn(_ sender: UIButton) {
let screenPoint = CGPoint(x: sceneView.bounds.midX, y: sceneView.bounds.midY)
guard let raycastQuery = sceneView.raycastQuery(from: screenPoint, allowing: .estimatedPlane, alignment: .any) else {return}
let raycastResults = sceneView.session.raycast(raycastQuery)
guard let firstResult = raycastResults.first else { return }
let position = SCNVector3.positionFrom(matrix: firstResult.worldTransform)
let sphere = SCNSphere(color: .systemBlue, radius: 0.01)
let node = SCNNode(geometry: sphere)
node.position = position
let lastNode = nodes.last
sceneView.scene.rootNode.addChildNode(node)
// Add the Sphere to the list.
nodes.append(node)
// Setting our starting point for drawing a line in real time
self.startingNode = nodes.last
if lastNode != nil {
// If there are 2 nodes or more
if nodes.count >= 2 {
// Create a node line between the nodes
let LineBetweenNodes = LineNode(from: (lastNode?.position)!, to: node.position, lineColor: UIColor.systemBlue)
// Add the Node to the scene.
sceneView.scene.rootNode.addChildNode(LineBetweenNodes)
}
}
}
then we have the imageView that the user can assign to the drawn path ,
// MARK: - handle Tap on ImageView
@objc func handleTap( _ sender : UITapGestureRecognizer){
if startingNode === nodes.last {
// Create a line segment from the last node to the first node
if let firstNode = nodes.first {
let LineBetweenNodes = LineNode(from: startingNode.position, to: firstNode.position, lineColor: UIColor.systemBlue)
sceneView.scene.rootNode.addChildNode(LineBetweenNodes)
//Add Image to the Closed Path
AddImage()
}
}
}
this is where we have the problem. The path is drawn correctly , it shows the 3d values correctly but it is not showing the image at the desired shape.
// MARK: - Add Image function
func AddImage(){
isEditingEnabled = false
let path = UIBezierPath()
// Iterate through the sphere nodes array
for (index, nodes) in nodes.enumerated() {
let position = nodes.position
if index == 0 {
// Move to the starting position
path.move(to: CGPoint(x: CGFloat(position.x), y: CGFloat(position.y)))
} else {
// Add a line segment to the next sphere node
path.addLine(to: CGPoint(x: CGFloat(position.x), y: CGFloat(position.y)))
}
}
path.close()
let shape = SCNShape(path: path, extrusionDepth: 0.01)
shape.firstMaterial?.diffuse.contents = UIImage(named: "grass1")
let shapeNode = SCNNode(geometry: shape)
shapeNode.position.z = -0.2
sceneView.scene.rootNode.addChildNode(shapeNode)
}
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [self] in
// get current hit position
// and check if start-node is available
if isEditingEnabled {
guard let currentPosition = self.doHitTestOnExistingPlanes(),
let start = self.startingNode else {
return
}
self.lineNode?.removeFromParentNode()
self.lineNode = LineNode(from: start.position, to: currentPosition, lineColor: UIColor.systemBlue)
self.sceneView.scene.rootNode.addChildNode(self.lineNode!)
}
else {
self.lineNode?.removeFromParentNode()
}
}
}
func doHitTestOnExistingPlanes() -> SCNVector3? {
let screenPoint = CGPoint(x: sceneView.bounds.midX, y: sceneView.bounds.midY)
guard let raycastQuery = sceneView.raycastQuery(from: screenPoint, allowing: .estimatedPlane, alignment: .any) else {return nil }
let raycastResults = sceneView.session.raycast(raycastQuery)
guard let firstResult = raycastResults.first else { return nil }
// get vector from transform
let hitPos = SCNVector3.positionFrom(matrix: firstResult.worldTransform)
return hitPos
}
}
Extra class to Add line between the nodes
class LineNode: SCNNode{
init(from vectorA: SCNVector3, to vectorB: SCNVector3, lineColor color: UIColor) {
super.init()
let height = self.distance(from: vectorA, to: vectorB)
self.position = vectorA
let nodeVector2 = SCNNode()
nodeVector2.position = vectorB
let nodeZAlign = SCNNode()
nodeZAlign.eulerAngles.x = Float.pi/2
let box = SCNBox(width: 0.003, height: height, length: 0.001, chamferRadius: 0)
let material = SCNMaterial()
material.diffuse.contents = color
box.materials = [material]
let nodeLine = SCNNode(geometry: box)
nodeLine.position.y = Float(-height/2) + 0.001
nodeZAlign.addChildNode(nodeLine)
self.addChildNode(nodeZAlign)
self.constraints = [SCNLookAtConstraint(target: nodeVector2)]
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
}
func distance(from vectorA: SCNVector3, to vectorB: SCNVector3)-> CGFloat
{
return CGFloat(sqrt((vectorA.x - vectorB.x) * (vectorA.x - vectorB.x) + (vectorA.y - vectorB.y) * (vectorA.y - vectorB.y) + (vectorA.z - vectorB.z) * (vectorA.z - vectorB.z)))
}
}
extensions used in my Code.
extension SCNSphere {
convenience init(color: UIColor, radius: CGFloat) {
self.init(radius: radius)
let material = SCNMaterial()
material.diffuse.contents = color
materials = [material]
}
}
extension SCNVector3 {
static func positionFrom(matrix: matrix_float4x4) -> SCNVector3 {
let column = matrix.columns.3
return SCNVector3(column.x, column.y, column.z)
}
}
One new Approach : if we add an anchor with each sphereNode , and join them together , it is giving us a shape but not showing the image :
if anchorsList.count >= 3 {
// Create a custom geometry connecting the anchor positions
var vertices = anchorsList.map { anchor in
SCNVector3(anchor.transform.columns.3.x, anchor.transform.columns.3.y, anchor.transform.columns.3.z)
}
// Close the shape by repeating the first vertex at the end
vertices.append(vertices[0])
let source = SCNGeometrySource(vertices: vertices)
var indices: [UInt32] = []
// Create indices for shape fan
for i in 1..<(vertices.count - 1) {
indices.append(0)
indices.append(UInt32(i))
indices.append(UInt32(i + 1))
}
let element = SCNGeometryElement(indices: indices, primitiveType: .triangles)
let geometry = SCNGeometry(sources: [source], elements: [element])
// Add material to the geometry
let material = SCNMaterial()
let image = UIImage(named: "grass1")
material.diffuse.contents = UIColor.red
material.isDoubleSided = true
geometry.materials = [material]
material.diffuse.contentsTransform = SCNMatrix4MakeScale(2, 2, 1)
let node = SCNNode(geometry: geometry)
sceneView.allowsCameraControl = true
// node.eulerAngles = SCNVector3(0,0,0)
sceneView.scene.rootNode.addChildNode(node)
I think you need to spend some time learning about and understanding 3D geometry...
The points you are generating for your bezier path exist in 3D-space - so the node / object you generate won't be a simple "fill in the path."
The positioning / sizing / orientation / etc will also be dependent on the orientation of the device when the AR view is initialized ... that is, if you launch the app with your device sitting flat on the desk and then pick up the device and point it, your "world coordinates" will be very different than if you launch the app while holding the device vertically and/or pointed a different direction.
Anyway...
If I make a couple changes to your
AddImage()func, including:position.zinstead ofposition.yThis edited func:
gives me these results...
After tapping Draw button 3 times:
After calling
AddImage():Rotating device after node has been created / added:
Using a couple more "draw" points:
Edit 1
Part of the issue with positioning and orienting the "image node" is that, once again, we're dealing with 3-Dimensional Space.
For example, if the user has placed nodes all on the same Y-plane:
it is fairly straightforward to create the shape and then rotate and position it:
However, if the Y-coordinates are NOT on the same plane, like this:
What to do? Try to create a node/object like this?
Or, do you want to place the image node at the first node's Y-coordinate?
In that case, the first node placed was the highest node...
So, one option might be to take the average of the Y-Coordinates:
If this video - https://www.youtube.com/watch?v=w2KhX8ARLO8 - looks close to what you're going for, I can update this answer with some additional code.
Edit 2
Assuming we have these materials setup as class properties:
and, assuming we've generated a valid array of nodes, your
AddImagefunc might look like this:Worth noting: it will be very easy to define a set of nodes that will give unexpected results.
For example, if the device is held vertically and we add nodes on a wall, the y-coordinates will vary greatly while the z-coordinates will have only a small variation -- resulting in a very small "shape" node.
But, making sure the user defines a valid array of nodes is outside the scope of this question.
I put up a sample project here: https://github.com/DonMag/ARSample
It shows several "preset" shapes... allows user-defined nodes... optionally animates the "shape node" rotation... and some other options.